blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
76fa13ebb809c0d96b5283163e49de9cd30ee7b8 | a1a934e69050884560074a633dfe33d21a8acfcb | /examples/scikit-learn/wine-quality/pyfunc_predict.py | 7c172de9a2b0fc2d98a8ae608c1cb2eceea55122 | [] | no_license | SkilledMinds/mlflow-fun | 977a8bf0e052e72f2b98ee8a17ed017034e6a9a2 | 3caa0e5f61739357733cc165338c1d5a3c93f456 | refs/heads/master | 2020-04-29T00:56:28.995924 | 2019-03-13T23:57:14 | 2019-03-13T23:57:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py |
# Serve predictions with mlflow.pyfunc.load_pyfunc()
from __future__ import print_function
import sys
import mlflow
import mlflow.pyfunc
import mlflow.tracking
import util
if __name__ == "__main__":
if len(sys.argv) < 1:
println("ERROR: Expecting RUN_ID PREDICTION_FILE")
sys.exit(1)
print("MLflow Version:", mlflow.version.VERSION)
run_id = sys.argv[1]
data_path = sys.argv[2] if len(sys.argv) > 2 else "wine-quality.csv"
print("data_path:",data_path)
print("run_id:",run_id)
client = mlflow.tracking.MlflowClient()
model_uri = client.get_run(run_id).info.artifact_uri + "/model"
print("model_uri:",model_uri)
model = mlflow.pyfunc.load_pyfunc(model_uri)
print("model:",model)
df = util.read_prediction_data(data_path)
predictions = model.predict(df)
print("predictions:",predictions)
| [
"amesar@users.noreply.github.co"
] | amesar@users.noreply.github.co |
af7495869c9a3fb0198ffbf102eb36b70a3ba9c1 | 52efcaacf23e2345d09a1de61610a74df457057f | /auto_derby/scenes/single_mode/aoharu_battle_confirm.py | 435e426f1990e5313b1c92abcfc2746f849e3917 | [
"MIT"
] | permissive | debi-derby/auto-derby | 78bc726e8243c8a25ddc13b364b7289f322caaaa | c2e5c138125cac6dc13dbd74045161ca03f6e5cf | refs/heads/master | 2023-09-03T09:03:35.305321 | 2021-11-02T16:18:45 | 2021-11-02T16:18:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,371 | py | # -*- coding=UTF-8 -*-
# pyright: strict
from __future__ import annotations
from PIL.Image import Image
from auto_derby import constants, template
from typing import Any, Dict, Iterator, Text, Tuple
from ... import action, templates, mathtools
from ...scenes import Scene
from ..scene import Scene, SceneHolder
def _recognize_predictions(
screenshot: Image,
) -> Iterator[Tuple[constants.RaceType, constants.RacePrediction]]:
rp = mathtools.ResizeProxy(screenshot.width)
bbox_list = (
(constants.RaceType.SPRINT, rp.vector4((31, 505, 113, 533), 540)),
(constants.RaceType.MILE, rp.vector4((136, 505, 199, 533), 540)),
(constants.RaceType.INTERMEDIATE, rp.vector4((230, 505, 309, 533), 540)),
(constants.RaceType.LONG, rp.vector4((331, 505, 405, 533), 540)),
(constants.RaceType.DART, rp.vector4((429, 505, 505, 533), 540)),
)
predition_templates = (
(constants.RacePrediction.HONNMEI, templates.PREDICTION_DOUBLE_CIRCLE),
(constants.RacePrediction.TAIKOU, templates.PREDICTION_CIRCLE_OUTLINE),
# TODO: add template for this
# (constants.RacePrediction.TANNANA, templates.PREDICTION_TRIANGLE),
(constants.RacePrediction.RENNSHITA, templates.PREDICTION_TRIANGLE_OUTLINE),
)
for t, bbox in bbox_list:
img = screenshot.crop(bbox)
for p, tmpl in predition_templates:
try:
next(
template.match(
img,
tmpl,
)
)
yield t, p
except StopIteration:
continue
class AoharuBattleConfirmScene(Scene):
def __init__(self) -> None:
super().__init__()
self.predictions: Dict[constants.RaceType, constants.RacePrediction] = {}
def to_dict(self) -> Dict[Text, Any]:
return {
"predictions": self.predictions,
}
@classmethod
def name(cls):
return "single-mode-aoharu-battle-confirm"
@classmethod
def _enter(cls, ctx: SceneHolder) -> Scene:
action.wait_image_stable(
templates.SINGLE_MODE_AOHARU_BATTLE_CONFIRM_TITLE, duration=0.2
)
return cls()
def recognize_predictions(self) -> None:
self.predictions = dict(_recognize_predictions(template.screenshot()))
| [
"NateScarlet@Gmail.com"
] | NateScarlet@Gmail.com |
4266fac216ad1d316fc296b75728ee21f701d3c9 | 509823ea14f04d5791486b56a592d7e7499d7d51 | /parte11/11.1_intro_funciones.py | ee051acf132ed8184e654c6fdfe566647698afbb | [] | no_license | Fhernd/Python-CursoV2 | 7613144cbed0410501b68bedd289a4d7fbefe291 | 1ce30162d4335945227f7cbb875f99bc5f682b98 | refs/heads/master | 2023-08-08T05:09:44.167755 | 2023-08-05T19:59:38 | 2023-08-05T19:59:38 | 239,033,656 | 64 | 38 | null | null | null | null | UTF-8 | Python | false | false | 2,212 | py | # Introducción a las funciones - Unidades de reutilización y encapsulación de información:
# 1. Creación de una función:
print('1. Creación de una función:')
def sumar(numero_1, numero_2):
"""
Suma dos números (sean enteros o punto flotante).
Parameters:
numero_1: primer valor a sumar.
numero_2: segundo valor a sumar.
Returns:
Suma de dos números (enteros o reales).
"""
suma = numero_1 + numero_2
return suma
x = 2
y = 3
resultado = sumar(x, y)
print('El resultado de sumar {} y {} es igual a {}.'.format(x, y, resultado))
print()
# 2. Invocación de una función:
resultado = sumar(2, 3)
print('El resultado de sumar {} y {} es igual a {}.'.format(x, y, resultado))
print()
# 3. Obtener documentación/ayuda de una función:
print('3. Obtener documentación/ayuda de una función:')
print()
help(sumar)
print()
help(print)
print()
# 4. Creación de una función para alternar los valores de dos variables:
print('4. Creación de una función para intercambiar los valores de dos variables:')
# a = 2, b = 3
# a = 3, b = 2
# auxiliar = 2
# a = 3
# b = 2
def intercambiar_valores(a, b):
"""
Intercambia los valores de dos variables.
Parameters:
a: primer valor.
b: segundo valor.
Returns:
Los valores de a y b intercambiados.
"""
auxiliar = a
a = b
b = auxiliar
return a, b
x = 2
b = 3
print('Valores de las variables `x` e `y` antes del intercambio:')
print(f'x = {x} - y = {y}')
resultado = intercambiar_valores(x, b)
x = resultado[0]
y = resultado[1]
print('Valores de las variables `x` e `y` después del intercambio:')
print(f'x = {x} - y = {y}')
print()
# 5. Uso de funcionalidad que provee en su defecto (incorporado) el lenguaje de programación:
print('5. Uso de funcionalidad que provee en su defecto (incorporado) el lenguaje de programación:')
x = 2
y = 3
resultado = x + y
print('El resultado de sumar {} y {} es igual a {}.'.format(x, y, resultado))
print()
print('Valores de las variables `x` e `y` antes del intercambio:')
print(f'x = {x} - y = {y}')
x, y = y, x
print('Valores de las variables `x` e `y` antes del intercambio:')
print(f'x = {x} - y = {y}')
| [
"johnortizo@outlook.com"
] | johnortizo@outlook.com |
6ecc8fecb6fb0874525588a2bd17ddb89ac54107 | ce083128fa87ca86c65059893aa8882d088461f5 | /python/flask-admin-examples/layout_bootstrap3/.venv/bin/flask | ceac5d8fcd84d5ec7c11f2c2125baa483fdaae5c | [] | no_license | marcosptf/fedora | 581a446e7f81d8ae9a260eafb92814bc486ee077 | 359db63ff1fa79696b7bc803bcfa0042bff8ab44 | refs/heads/master | 2023-04-06T14:53:40.378260 | 2023-03-26T00:47:52 | 2023-03-26T00:47:52 | 26,059,824 | 6 | 5 | null | 2022-12-08T00:43:21 | 2014-11-01T18:48:56 | null | UTF-8 | Python | false | false | 287 | #!/root/NetBeansProjects/fedora/python/flask-admin-examples/layout_bootstrap3/.venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"marcosptf@yahoo.com.br"
] | marcosptf@yahoo.com.br | |
4b8b01a2cbf64cf1df9453d4779d4dae791226e7 | d33bb5d51c432058d2c8efa0882f24c8dad2bb4f | /setup.py | 31ab4adf42f3560f9bdf10a9780f3d01f23951c9 | [
"Apache-2.0"
] | permissive | anuragarnab/fvcore | 351e8a50fe27993646f774823f09331f62d161ae | da9d3658590c9f672998850542817acecd98facc | refs/heads/master | 2023-03-08T23:21:27.825110 | 2021-02-24T22:30:32 | 2021-02-24T22:30:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,782 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
from os import path
from setuptools import find_packages, setup
def get_version():
init_py_path = path.join(
path.abspath(path.dirname(__file__)), "fvcore", "__init__.py"
)
init_py = open(init_py_path, "r").readlines()
version_line = [l.strip() for l in init_py if l.startswith("__version__")][0]
version = version_line.split("=")[-1].strip().strip("'\"")
# Used by CI to build nightly packages. Users should never use it.
# To build a nightly wheel, run:
# BUILD_NIGHTLY=1 python setup.py sdist
if os.getenv("BUILD_NIGHTLY", "0") == "1":
from datetime import datetime
date_str = datetime.today().strftime("%Y%m%d")
# pip can perform proper comparison for ".post" suffix,
# i.e., "1.1.post1234" >= "1.1"
version = version + ".post" + date_str
new_init_py = [l for l in init_py if not l.startswith("__version__")]
new_init_py.append('__version__ = "{}"\n'.format(version))
with open(init_py_path, "w") as f:
f.write("".join(new_init_py))
return version
setup(
name="fvcore",
version=get_version(),
author="FAIR",
license="Apache 2.0",
url="https://github.com/facebookresearch/fvcore",
description="Collection of common code shared among different research "
"projects in FAIR computer vision team",
python_requires=">=3.6",
install_requires=[
"numpy",
"yacs>=0.1.6",
"pyyaml>=5.1",
"tqdm",
"termcolor>=1.1",
"Pillow",
"tabulate",
"iopath>=0.1.2",
],
extras_require={"all": ["shapely"]},
packages=find_packages(exclude=("tests",)),
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
900590a83b7d3240581c458f12d539d50dab438f | e3ec7260806c1e2b045a0de93a150a5c3fc1b9df | /test/ResultsAndPrizes/top-3/test_top_3_winning_numbers_of_the_last_4_draws.py | e44b5657bb3991bf27e30b9fabd015acfc7b59ef | [
"Apache-2.0"
] | permissive | FearFactor1/SPA | 58a21c9ec7a72a78f5ff50214e58faac43a3059d | a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7 | refs/heads/master | 2021-07-07T04:25:12.525595 | 2020-11-16T14:35:33 | 2020-11-16T14:35:33 | 204,684,720 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | # Топ-3 + Выигрышные номера последних 4 тиражей
def test_top_3_winning_numbers_last_4_draws(app):
app.ResultAndPrizes.open_page_results_and_prizes()
app.ResultAndPrizes.click_game_top_3()
app.ResultAndPrizes.click_winning_numbers_of_the_last_4_draws()
app.ResultAndPrizes.button_get_report_winners()
app.ResultAndPrizes.parser_report_text_winners()
assert "ВЫИГРЫШНЫЕ НОМЕРА" in app.ResultAndPrizes.parser_report_text_winners()
app.ResultAndPrizes.message_id_33_top_3_winning_numbers_4_last_draw()
app.ResultAndPrizes.comeback_main_page() | [
"zelezodoroznik@yandex.ru"
] | zelezodoroznik@yandex.ru |
339b0f19ded0667fb6df857d1218b8eef24f7bde | 4415f0a06536b66d4e7425b3995c4009516c180d | /World1/Challenge026.py | 0091692c182ee489a19bb63c2c350ef621f2301b | [] | no_license | AndreisSirlene/Python-Exercises-Curso-em-Video-World-1-2-and-3 | c73c2df958f5b83744af6288d26bb270aa30f8fd | 62f59383eee9b8ab43ff78495cf30eb390638013 | refs/heads/master | 2023-03-30T11:34:12.672180 | 2021-03-24T23:34:17 | 2021-03-24T23:34:17 | 290,814,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | phrase = str(input('Type any phrase:')).upper().strip()
print('In this phrase the letter I appears {} times'.format(phrase.count('I')))
print('The first letter I appears in the position {}'.format(phrase.find('I')+1))
print('The last position that letter I appears is {}'.format(phrase.rfind('I')+1)) | [
"andreissirlene@gmail.com"
] | andreissirlene@gmail.com |
2f2f1517049db9c6fdfb700fba9d62aacad883c3 | c9aa34a74e9f095c4dec21acf0f63ce33ea54757 | /UndocumentedScripts/CheckPnRDistanceAndTime.py | 1258464347152b538a8564eff4fc80b7360d2030 | [] | no_license | dvrpc/TIM3AnalysisScripts | d4a5308987279da421d4e9e76ca3ff6efe4e6490 | cb8d50eead676b2950051a6d527be7a13de19b9f | refs/heads/main | 2023-07-30T01:20:52.457588 | 2021-10-01T15:34:47 | 2021-10-01T15:34:47 | 372,885,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,827 | py | import pandas as pd
import numpy as np
import VisumPy.helpers as h
def log(msg):
Visum.Log(20480, msg)
log('Reading Skims')
zones = np.array(h.GetMulti(Visum.Net.Zones, "No"))
def read_skim_tofrom_pnr(matno):
global zones
skim = pd.DataFrame(h.GetMatrix(Visum, matno), zones, zones)
return skim
skims= {'time': {'0000': read_skim_tofrom_pnr(301),
'0600': read_skim_tofrom_pnr(302),
'1000': read_skim_tofrom_pnr(303),
'1500': read_skim_tofrom_pnr(304),
'1900': read_skim_tofrom_pnr(305)},
'dist': {'0000': read_skim_tofrom_pnr(401),
'0600': read_skim_tofrom_pnr(402),
'1000': read_skim_tofrom_pnr(403),
'1500': read_skim_tofrom_pnr(404),
'1900': read_skim_tofrom_pnr(405)}}
def get_skims(args):
global skims
o = args[0] #Origin
d = args[1] #Destination
t = args[2] #Time Period
r = args[3] #Direction
return (skims['time'][t].loc[o, d], skims['dist'][t].loc[o, d])
def classify_time(args):
if args[2] == 1:
t = args[0]
else:
t = args[1]
if t < 360:
return '0000'
elif t < 600:
return '0600'
elif t < 900:
return '1000'
elif t < 1140:
return '1500'
else:
return '1900'
log('Reading maz2taz correspondence')
maz2taz_file = r'D:\TIM3.1\DaySimLab\scenario\inputs\parcels_buffered.dat'
maz2taz = pd.read_csv(maz2taz_file, ' ', index_col = 0)
log('Reading Trip Data')
trip_file = r'D:\TIM3.1\DaySimLab\scenario\Output\05201612\_trip_2.dat'
trip = pd.read_csv(trip_file, '\t')
trip['otaz'] = trip['opcl'].map(maz2taz['taz_p'])
trip['dtaz'] = trip['dpcl'].map(maz2taz['taz_p'])
log('Reading Tour Data')
tour_file = trip_file.replace('trip', 'tour')
tour = pd.read_csv(tour_file, '\t')
log('Merging and Querying')
trip = tour.merge(trip, on = ['hhno', 'pno', 'day', 'tour']) #Merge tour info to trip info
trip = trip.query('(opurp == 0 and dpurp == 10) or (opurp == 10 and dpurp == 0)') #From home to PnR or vice-versa
log('Setting Up Table')
trip['direction'] = np.where(trip['dpurp'] == 10, 'ToPnR', 'FromPnR')
trip['tod_args'] = list(zip(trip['arrtm'], trip['deptm'], trip['half']))
trip['tod'] = trip['tod_args'].apply(classify_time)
log('Getting Skim Values')
trip['skim_args'] = list(zip(trip['otaz'], trip['dtaz'], trip['tod'], trip['direction']))
trip['skims'] = trip['skim_args'].apply(get_skims)
trip['skimtime'] = trip['skims'].apply(lambda x: x[0])
trip['skimdist'] = trip['skims'].apply(lambda x: x[1])
log('Comparing')
trip['timediff'] = trip['skimtime'] - trip['travtime']
trip['distdiff'] = trip['skimdist'] - trip['travdist']
log('Writing')
outfile = r'D:\TIM3\PnRTripsWithVISUMSkimsReclassifyZones.csv'
trip.to_csv(outfile)
log('Done') | [
"jflood@dvrpc.org"
] | jflood@dvrpc.org |
aa90679fd2eaab72c0d8b81a0e0311920984962f | 7db04177060d1b1d13bdf4e0f77cda83488d08bd | /backend/auth/email.py | d6bd5a020d88428569dc96d602dbacd15c3c499c | [] | no_license | AlenAlic/mailserver | 6ed5aa32e7f97b732e3630c1c435f233851a4a6a | a37f022feb2e3a2433bf7bf15c1cef0fc364901b | refs/heads/master | 2023-02-02T09:17:00.192475 | 2019-11-08T21:12:11 | 2019-11-08T21:12:11 | 220,267,001 | 0 | 0 | null | 2023-01-09T22:33:19 | 2019-11-07T15:21:32 | CSS | UTF-8 | Python | false | false | 689 | py | from flask import render_template
from backend.email import send_email
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('Password reset', recipients=[user.email],
text_body=render_template('email/reset_password.txt', user=user, token=token),
html_body=render_template('email/reset_password.html', user=user, token=token))
def send_activation_email(user):
send_email('Activate account', recipients=[user.email],
text_body=render_template('email/activate_account.txt', user=user, token=token),
html_body=render_template('email/activate_account.html', user=user, token=token))
| [
"aalic89@gmail.com"
] | aalic89@gmail.com |
8bad4dd94e501f270bd800aaa6a30a287cc857bf | f0cec246e2f30f6b4ee5656f1cb6406dd0f7879a | /thingsboard_client/models/entity_relations_query.py | 3809b31c1629f825572c4525d74a841698f3feae | [] | no_license | ascentio-tech/thingsboard-swagger-client | 4e2f7c943e243ec8505c32dab0aa3d6cf1559105 | 1e8bf7664c281c29612fd5b44261f049ca7c44fd | refs/heads/master | 2021-07-20T07:18:12.969459 | 2020-06-17T02:35:54 | 2020-06-17T02:35:54 | 184,322,192 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,072 | py | # coding: utf-8
"""
Thingsboard REST API
For instructions how to authorize requests please visit <a href='http://thingsboard.io/docs/reference/rest-api/'>REST API documentation page</a>. # noqa: E501
OpenAPI spec version: 2.0
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class EntityRelationsQuery(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'filters': 'list[EntityTypeFilter]',
'parameters': 'RelationsSearchParameters'
}
attribute_map = {
'filters': 'filters',
'parameters': 'parameters'
}
def __init__(self, filters=None, parameters=None): # noqa: E501
"""EntityRelationsQuery - a model defined in Swagger""" # noqa: E501
self._filters = None
self._parameters = None
self.discriminator = None
if filters is not None:
self.filters = filters
if parameters is not None:
self.parameters = parameters
@property
def filters(self):
"""Gets the filters of this EntityRelationsQuery. # noqa: E501
:return: The filters of this EntityRelationsQuery. # noqa: E501
:rtype: list[EntityTypeFilter]
"""
return self._filters
@filters.setter
def filters(self, filters):
"""Sets the filters of this EntityRelationsQuery.
:param filters: The filters of this EntityRelationsQuery. # noqa: E501
:type: list[EntityTypeFilter]
"""
self._filters = filters
@property
def parameters(self):
"""Gets the parameters of this EntityRelationsQuery. # noqa: E501
:return: The parameters of this EntityRelationsQuery. # noqa: E501
:rtype: RelationsSearchParameters
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this EntityRelationsQuery.
:param parameters: The parameters of this EntityRelationsQuery. # noqa: E501
:type: RelationsSearchParameters
"""
self._parameters = parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EntityRelationsQuery, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EntityRelationsQuery):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"gmatheu@ascentio.com.ar"
] | gmatheu@ascentio.com.ar |
04ce739cc903974beed995d84e696a06f1ca6a8c | a3366c118bb0b899cb436d89fc76f231d63366d2 | /fc_toolbelt/tasks/project.py | 7556d39a8d8e5356d343934d48baaa0b65a33e90 | [] | no_license | futurecolors/fc-toolbelt | cd25ac1f6fc3e2414d12d8235374220944e5136e | 2f0d33f612a676bf9ff2f8e769c4d4638b5e9212 | refs/heads/master | 2021-01-10T19:55:36.062097 | 2013-10-31T10:56:51 | 2013-10-31T10:56:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,897 | py | # coding: utf-8
import os
from functools import partial
from fabric.colors import green
from fabric.context_managers import cd, prefix
from fabric.operations import run, sudo
from fabric.state import env
from fabric.tasks import Task, execute
from fabric.utils import puts
from fc_toolbelt.tasks.gitlab import BaseGitlabTask
from fc_toolbelt.tasks.mysql import create_dev_db
from .writers import write_uwsgi, write_nginx, write_project
class OpenTin(BaseGitlabTask):
""" Commit new Django project from template into repo"""
def run(self, project_slug):
self.project_slug = project_slug
tmpdir = '/tmp/fctools/'
run('rm -rf /tmp/fctools/')
run('mkdir -p %s' % tmpdir)
self.create_folders_from_can(tmpdir)
self.connect()
repo_url = self.get_repo_url_by_path(project_slug)
self.make_initial_commit(os.path.join(tmpdir, self.project_slug), repo_url)
run('rm -rf /tmp/fctools/')
def create_folders_from_can(self, dir):
""" Clone project template, make virtualenv, custom startproject"""
with cd(dir):
env_name = 'canned_env'
template_project_dirname = 'project_template'
run('git clone %(repo)s %(dir)s' % {'repo': env.TEMPLATE_PROJECT_REPO,
'dir': template_project_dirname})
run('virtualenv %s' % env_name)
with prefix('source %s/bin/activate' % env_name):
run('pip install django')
run('django-admin.py startproject %(project)s --template=%(template)s --extension=py,gitignore' % {
'project': self.project_slug,
'template': os.path.join(template_project_dirname, env.TEMPLATE_PROJECT_PACKAGE),
})
def make_initial_commit(self, project_dir, repo_url):
""" Init git repo and push it as current user """
with cd(project_dir):
run('git init')
run('git config user.email "%s@fctools"' % env.user)
run('git config user.name "%s"' % env.user)
run('git add .')
run('git commit -m "Initial commit via fctools"')
run('git remote add origin %s' % repo_url)
run('git checkout -b dev')
run('git push --all --force')
open_tin = OpenTin()
class AddDeveloper(BaseGitlabTask):
""" Creates development project environment for developer"""
name = 'add_developer'
def run(self, project_slug, developer, uwsgi_config=None):
self.project_slug = project_slug
self.developer = developer
self.connect()
repo_url = self.get_repo_url_by_path(project_slug)
self.setup_files(repo_url)
self.setup_databases()
self.setup_http()
puts(green('Congrats! Now visit: %s' % ('http://%s.%s' % (project_slug, developer))))
def setup_files(self, repo_url):
sudo_user = partial(sudo, user=self.developer)
sudo_user('mkdir -p %s' % env.PROJECTS_PATH_TEMPLATE % {'user': self.developer})
puts('Setting up new project "%s" for %s' % (self.project_slug, self.developer))
execute(write_project, project_slug=self.project_slug,
developer=self.developer,
repo_url=repo_url)
puts('Created project "%s" layout for %s' % (self.project_slug, self.developer))
def setup_databases(self):
execute(create_dev_db, self.project_slug, self.developer)
puts('Setup of dev db "%s" for %s is finished' % (self.project_slug, self.developer))
def setup_http(self):
execute(write_uwsgi, self.project_slug, self.developer)
execute(write_nginx, self.project_slug, self.developer)
puts('Nginx+uwsgi are set up for "%s" project, developer %s' % (self.project_slug, self.developer))
add_developer = AddDeveloper()
| [
"baryshev@gmail.com"
] | baryshev@gmail.com |
5bb63b411ea64c0b78942432783bb9e0c6f28e02 | 51e7336e8bb447187cbe6ede2910f40700316dc1 | /simics/monitorCore/hapCleaner.py | 221c8265c08cba073d1765b15bf20f581efc1275 | [] | no_license | hacker-steroids/RESim | 69bac74a1b119c54d03b9ea0fda7a85cc45ea854 | 94498c699575f5078de415fac8c517d520cb2f94 | refs/heads/master | 2020-05-30T12:33:53.799610 | 2019-06-01T00:51:20 | 2019-06-01T00:51:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | '''
Structures for cleaning up stop haps used in reverse execution
'''
class HapCleaner():
hlist = None
def __init__(self, cpu):
self.hlist = []
self.cpu = cpu
class HapType():
def __init__(self, htype, hap):
self.htype = htype
self.hap = hap
def add(self, htype, hap):
ht = self.HapType(htype, hap)
self.hlist.append(ht)
class StopAction():
''' hap_clearer is a list of haps to delete
breakpoints to be deleted
list of functions to be executed '''
def __init__(self, hap_cleaner, breakpoints, flist=None, break_addrs = []):
self.hap_clean = hap_cleaner
self.break_addrs = break_addrs
self.exit_addr = None
if breakpoints is not None:
self.breakpoints = breakpoints
else:
self.breakpoints = []
if flist is not None:
self.flist = flist
else:
self.flist = []
def run(self):
''' Process the functions in the flist '''
if len(self.flist) > 0:
fun = self.flist.pop(0)
fun.run(self.flist)
def getBreaks(self):
return self.break_addrs
def setExitAddr(self, exit_addr):
self.exit_addr = exit_addr
def getExitAddr(self):
return self.exit_addr
def addFun(self, fun):
self.flist.append(fun)
| [
"mfthomps@nps.edu"
] | mfthomps@nps.edu |
ecb0e899bf03e3016709f291990968cbabf79748 | ad054cebf4198f25d6ca9b37b0eef4783762ac04 | /Algorithm/개념정리/Memoization/Memoization_새꼼달꼼 장사.py | a40cd5afd48fe22ef6fc0fc0565a9c744e7da794 | [] | no_license | ges0531/TIL | 4888d0bde5f84ad80caac63ffecf247d22daa0bf | 54389b30e0a67f9c9a3329b1b59c43cdbb33a62c | refs/heads/master | 2023-01-10T23:51:37.409124 | 2020-08-01T07:42:23 | 2020-08-01T07:42:23 | 195,916,245 | 0 | 0 | null | 2023-01-05T01:18:07 | 2019-07-09T02:17:43 | Python | UTF-8 | Python | false | false | 1,470 | py | def max_profit_memo(price_list, count, cache):
# Base Case: 0개 혹은 1개면 부분 문제로 나눌 필요가 없기 때문에 가격을 바로 리턴한다
if count < 2:
cache[count] = price_list[count]
return price_list[count]
# 이미 계산한 값이면 cache에 저장된 값을 리턴한다
if count in cache:
return cache[count]
# profit은 count개를 팔아서 가능한 최대 수익을 저장하는 변수
# 팔려고 하는 총개수에 대한 가격이 price_list에 없으면 일단 0으로 설정
# 팔려고 하는 총개수에 대한 가격이 price_list에 있으면 일단 그 가격으로 설정
if count < len(price_list):
profit = price_list[count]
else:
profit = 0
# count개를 팔 수 있는 조합들을 비교해서, 가능한 최대 수익을 profit에 저장
for i in range(1, count // 2 + 1):
profit = max(profit, max_profit_memo(price_list, i, cache)
+ max_profit_memo(price_list, count - i, cache))
# 계산된 최대 수익을 cache에 저장
cache[count] = profit
return cache[count]
def max_profit(price_list, count):
max_profit_cache = {}
return max_profit_memo(price_list, count, max_profit_cache)
# 테스트
print(max_profit([0, 100, 400, 800, 900, 1000], 5))
print(max_profit([0, 100, 400, 800, 900, 1000], 10))
print(max_profit([0, 100, 400, 800, 900, 1000, 1400, 1600, 2100, 2200], 9)) | [
"dmstndlekd@gmail.com"
] | dmstndlekd@gmail.com |
2525c9a1cf70f389e645dc0637b9f0e5cb23f128 | 90bbeb45b900f1ccf57652d5439fc27be891f4c3 | /Chapter 4/tests/test_plain_text_email.py | ce514bccc4dbd2a4fc127170ca723ae5ecbb9009 | [] | no_license | RancyChepchirchir/ThoughtfulMachineLearningWithPython-py38 | cdab296c063c5764e4255105b7c627ed022fbf75 | d83959b9c21179b2e1a9d1f85428bc8d18d1fb36 | refs/heads/master | 2022-11-19T07:48:07.008197 | 2020-07-20T17:42:21 | 2020-07-20T17:42:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | import unittest
import io
import re
from email_object import EmailObject
class TestPlaintextEmailObject(unittest.TestCase):
CLRF = "\n\n"
def setUp(self):
self.plain_file = 'fixtures/plain.eml'
with io.open(self.plain_file, 'rb') as plaintext:
self.text = plaintext.read().decode('utf-8')
plaintext.seek(0)
self.plain_email = EmailObject(plaintext)
def test_parse_plain_body(self):
body = self.CLRF.join(self.text.split(self.CLRF)[1:])
self.assertEqual(self.plain_email.body(), body)
def test_parses_the_subject(self):
subject = re.search("Subject: (.*)", self.text).group(1)
self.assertEqual(self.plain_email.subject(), subject)
| [
"A.Fettouhi@gmail.com"
] | A.Fettouhi@gmail.com |
4ddd105253fe7cf2e3d05abbd6b48b0b249eb296 | 2da798f1b31c6482d8f47bce394d78ccfae9d279 | /raw_data_processing/GSE155513/SRS7124070/scripts/add_uns.py | ff439b564ac76b82fb7bfd77e135d2333a4e4386 | [] | no_license | mariafiruleva/sc_athero_itmo_master | 47378083201e0dbad327b98291bbf4e65d5d3cc5 | e3c8c1b55d61b551957da13d109c8dfb56aa3173 | refs/heads/main | 2023-05-20T11:49:23.202549 | 2021-06-07T16:26:15 | 2021-06-07T16:26:15 | 373,524,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,597 | py | import argparse
import re
from urllib.request import urlopen, Request
from xml.etree.ElementTree import parse
import numpy as np
import pandas as pd
import scanpy
def get_markers(markers_file: str) -> dict:
markers = pd.read_csv(markers_file, sep='\t')
return {k: np.array(list(v.values())) for k, v in markers.to_dict().items()}
path = "/mnt/tank/scratch/mfiruleva/scn/data/GSE155513/SRS7124070"
def add_uns(h5: str, h5_out: str, s_d: str, summary_file: str, kallisto_script=None, technology=None) -> None:
file = scanpy.read_h5ad(h5)
description = pd.read_csv(s_d).reset_index().to_dict("records")[0]
file.uns["expType"] = "counts"
file.uns["public"] = True
file.uns["curated"] = False
file.uns["gse"] = description['GSE']
file.uns["geo"] = f"https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc={description['GSE']}"
file.uns["study_accession"] = description['study_accession']
file.uns["species"] = description['scientific_name']
if isinstance(file.uns["species"], list):
file.uns["species"] = file.uns["species"][0]
if technology:
file.uns['technology'] = technology
else:
if description['technology'] != "10x":
file.uns["technology"] = description['technology']
else:
with open(kallisto_script, 'r') as run_file:
data = run_file.read().replace('\n', '')
file.uns["technology"] = re.findall('10xv[0-9]*', data)[0]
link = Request(f'https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc={description["GSE"]}',
headers={'User-Agent': 'Mozilla/5.0'})
link = urlopen(link)
article = Request(link.url, headers={'User-Agent': 'Mozilla/5.0'})
response = urlopen(article).read()
acc_ids = {'SRP': re.findall('SRP\d*', response.decode('utf-8'))[0],
'PRJNA': re.findall('SRP\d*', response.decode('utf-8'))[0]
}
if acc_ids['SRP']:
var_url = urlopen(
f'http://trace.ncbi.nlm.nih.gov/Traces/sra/sra.cgi?save=efetch&db=sra&rettype=runtable&term={acc_ids["SRP"]}')
else:
var_url = urlopen(
f'http://trace.ncbi.nlm.nih.gov/Traces/sra/sra.cgi?save=efetch&db=sra&rettype=runtable&term={acc_ids["PRJNA"]}')
xmldoc = parse(var_url)
file.uns["title"] = xmldoc.findall('EXPERIMENT_PACKAGE/STUDY/DESCRIPTOR/STUDY_TITLE')[0].text
study_des = xmldoc.findall('EXPERIMENT_PACKAGE/STUDY/DESCRIPTOR/STUDY_ABSTRACT')[0].text
file.uns["description"] = re.sub('Overall design:\s*', '', study_des)
file.uns["design"] = re.sub('Overall design:\s*', '', re.findall('Overall design:.*', study_des)[0])
file.uns["token"] = description['secondary_sample_accession']
file.uns["sra"] = f"https://www.ncbi.nlm.nih.gov/sra/{description['secondary_sample_accession']}"
file.uns['processed_from_panglao'] = False
meta = {'dataset': file.uns['gse'], 'sample': file.uns['token'], 'organism': file.uns['species'],
'technology': file.uns['technology'], 'path': path}
pd.DataFrame.from_dict(meta, orient='index').T.to_csv(summary_file, mode='a', header=False, index=False)
file.uns['markers'] = dict()
resolutions = re.sub('\s', '', "0.2, 0.4, 0.6, 0.8, 1").split(',')
for res in resolutions:
file.uns['markers'][f'markers{res}'] = get_markers(f'markers/SCT_snn_res.{res}/markers.tsv')
file.write_h5ad(h5_out, compression='gzip')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Define 10x version")
parser.add_argument('--h5', type=str, required=True,
help='h5 input filename without uns after Seurat processing')
parser.add_argument('--h5_out', type=str, required=True,
help='h5 output filename with filled uns')
parser.add_argument('--kallisto_script', type=str, required=False, default=None,
help='Path to kallisto script')
parser.add_argument('--s_d', type=str, required=True,
help='Path to sample description file')
parser.add_argument('--summary_file', type=str, required=True,
help='Path to the summary file')
parser.add_argument('--technology', type=str, required=False, default=None,
help='Name of used technology; this argument specified in case of panglao db')
args = parser.parse_args()
add_uns(h5=args.h5, h5_out=args.h5_out, kallisto_script=args.kallisto_script,
s_d=args.s_d, summary_file=args.summary_file, technology=args.technology) | [
"mmfiruleva@gmail.com"
] | mmfiruleva@gmail.com |
4bfff399a77f0d64ddfa5d94b522b8f764483c1e | 95ef92ea4cafc8865268f38a7cb03dc078f39846 | /content/migrations/0001_initial.py | 5a939b52afa3ff5b4b53b3217f9a31501ee95989 | [
"MIT"
] | permissive | enterstudio/digihel | ec8ea7d714897c8b041f91ff3d0e89e2e9ec364a | b0f99a0be768df3b3a0cae20fe29a4018cd67ef7 | refs/heads/master | 2022-05-25T17:42:29.419062 | 2017-04-24T08:55:08 | 2017-04-24T08:55:08 | 89,243,603 | 1 | 0 | MIT | 2022-05-17T02:21:20 | 2017-04-24T13:30:06 | CSS | UTF-8 | Python | false | false | 1,124 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-05 11:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0028_merge'),
]
operations = [
migrations.CreateModel(
name='ContentPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.StreamField((('heading', wagtail.wagtailcore.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())))),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| [
"juha.yrjola@iki.fi"
] | juha.yrjola@iki.fi |
d70d778641730cf8a69624e5b36c83a7e9a896d1 | c2a46158a91d3dd41e962230d182c80bfc88886e | /test/test_documents_builddoc_model.py | 38d2e7aeecf21fdb98dcccb36ce2c9e117678aaf | [] | no_license | martinsauve/doli-swagger-client-python | e5f4308b6a38c34c4c0bcc796f6863e983b6d7da | b2606e6f9f3064fe55f81ab90ec524921086a159 | refs/heads/master | 2023-08-14T00:04:02.228383 | 2021-10-06T15:34:02 | 2021-10-06T15:34:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | # coding: utf-8
"""
Restler API Explorer
Live API Documentation # noqa: E501
OpenAPI spec version: 1
Contact: arul@luracast.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.documents_builddoc_model import DocumentsBuilddocModel # noqa: E501
from swagger_client.rest import ApiException
class TestDocumentsBuilddocModel(unittest.TestCase):
"""DocumentsBuilddocModel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDocumentsBuilddocModel(self):
"""Test DocumentsBuilddocModel"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.documents_builddoc_model.DocumentsBuilddocModel() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"martin@billesandco.paris"
] | martin@billesandco.paris |
b8377b2e248859ff49ac04507a18cd900f528562 | d8edd97f8f8dea3f9f02da6c40d331682bb43113 | /networks1147.py | 4f426f1b851598c40bd8914d2ca5ee2a706e642e | [] | no_license | mdubouch/noise-gan | bdd5b2fff3aff70d5f464150443d51c2192eeafd | 639859ec4a2aa809d17eb6998a5a7d217559888a | refs/heads/master | 2023-07-15T09:37:57.631656 | 2021-08-27T11:02:45 | 2021-08-27T11:02:45 | 284,072,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,187 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
__version__ = 205
# Number of continuous features (E, t, dca)
n_features = 3
geom_dim = 2
class Gen(nn.Module):
def __init__(self, ngf, latent_dims, seq_len, encoded_dim, n_wires):
super().__init__()
self.latent_dims = latent_dims
self.ngf = ngf
self.seq_len = seq_len
self.version = __version__
# Input: (B, latent_dims, 1)
self.act = nn.ReLU()
self.dropout = nn.Dropout(0.1)
n512 = 128
self.n512 = n512
n256 = n512 // 2
n128 = n512 // 4
n64 = n512 // 8
n32 = n512 // 16
n16 = n512 // 32
class Simple(nn.Module):
def __init__(self, in_c, out_c, *args, **kwargs):
super().__init__()
self.conv = nn.ConvTranspose1d(in_c, out_c, *args, **kwargs)
self.norm = nn.BatchNorm1d(out_c)
self.act = nn.ReLU()
def forward(self, x):
return self.act(self.norm(self.conv(x)))
class Res(nn.Module):
def __init__(self, in_c, out_c, k_s, stride, *args, **kwargs):
super().__init__()
self.s1 = Simple(in_c, out_c, k_s, stride, *args, **kwargs)
self.s2 = Simple(out_c, out_c, 3, 1, 1)
self.s3 = Simple(out_c, out_c, 3, 1, 1)
self.conv4 = nn.ConvTranspose1d(out_c, out_c, 3, 1, 1)
self.norm4 = nn.BatchNorm1d(out_c)
if in_c != out_c:
self.convp = nn.ConvTranspose1d(in_c, out_c, 1, 1, 0)
else:
self.convp = nn.Identity()
self.interp = nn.Upsample(scale_factor=stride, mode='linear')
self.act = nn.ReLU()
def forward(self, x):
y0 = self.convp(self.interp(x))
y = self.s1(x)
y = self.s2(y)
y = self.s3(y)
y = self.act(self.norm4(y0 + self.conv4(y)))
return y
self.lin0 = nn.Linear(latent_dims, 128 * 128)
self.s1 = Res(128, 128, 3, 2, 1, output_padding=1)
self.s2 = Res(128, 128, 3, 2, 1, output_padding=1)
self.s3 = Res(128, 128, 3, 2, 1, output_padding=1)
self.s4 = Res(128, 64, 3, 2, 1, output_padding=1)
self.convp = nn.ConvTranspose1d(64, n_features, 1, 1, 0, bias=True)
self.convw = nn.ConvTranspose1d(64, n_wires, 1, 1, 0, bias=True)
self.out = nn.Tanh()
self.max_its = 3000
self.temp_min = 1.0
self.gen_it = 3000
def forward(self, z):
#print('latent space %.2e %.2e' % (z.mean().item(), z.std().item()))
# z: random point in latent space
x = self.act(self.lin0(z).reshape(-1, 128, 128))
x = self.s1(x)
x = self.s2(x)
x = self.s3(x)
x = self.s4(x)
w = self.convw(x)
tau = 1. / ((1./self.temp_min)**(self.gen_it / self.max_its))
wg = F.gumbel_softmax(w, dim=1, hard=True, tau=tau)
p = self.convp(x)
return self.out(p), wg
class Disc(nn.Module):
def __init__(self, ndf, seq_len, encoded_dim, n_wires):
super().__init__()
self.version = __version__
# (B, n_features, 256)
self.act = nn.LeakyReLU(0.2)
n512 = 512
n256 = n512//2
n128 = n512//4
n64 = n512//8
n32 = n512//16
nproj = 4
class Simple(nn.Module):
def __init__(self, in_c, out_c, *args, **kwargs):
super().__init__()
self.conv = nn.Conv1d(in_c, out_c, *args, **kwargs)
self.act = nn.LeakyReLU(0.2)
def forward(self, x):
return self.act(self.conv(x))
class Res(nn.Module):
def __init__(self, in_c, out_c, k_s, stride, *args, **kwargs):
super().__init__()
self.s1 = Simple(in_c, in_c, 3, 1, 1)
self.s2 = Simple(in_c, in_c, 3, 1, 1)
self.s3 = Simple(in_c, in_c, 3, 1, 1)
self.conv4 = nn.Conv1d(in_c, out_c, k_s, stride, *args, **kwargs)
self.act = nn.LeakyReLU(0.2)
if in_c != out_c:
self.convp = nn.Conv1d(in_c, out_c, 1, 1, 0)
else:
self.convp = nn.Identity()
self.interp = nn.AvgPool1d(stride)
def forward(self, x):
y0 = self.convp(self.interp(x))
y = self.s1(x)
y = self.s2(y)
y = self.s3(y)
y = self.act(y0 + self.conv4(y))
return y
self.convw0 = nn.Conv1d(n_wires, nproj, 1, 1, 0, bias=False)
self.s1 = Res(1+n_features+nproj+geom_dim, 64, 3, 1, 1, padding_mode='circular')
self.s2 = Res(64, 128, 3, 2, 1)
self.s3 = Res(128, 128, 3, 2, 1)
self.s4 = Res(128, 128, 3, 2, 1)
self.lin0 = nn.Linear(128, 1)
self.out = nn.Identity()
def forward(self, x_, xy_, w_):
# x_ is concatenated tensor of p_ and w_, shape (batch, features+n_wires, seq_len)
# p_ shape is (batch, features, seq_len),
# w_ is AE-encoded wire (batch, encoded_dim, seq_len)
seq_len = x_.shape[2]
#dist = ((xy - nn.ConstantPad1d((1, 0), 0.0)(xy[:,:,:-1]))**2).sum(dim=1).unsqueeze(1)
p = x_
#xy = x[:,n_features:n_features+geom_dim]
wg = w_
#xy = torch.tensordot(wg, wire_sphere+torch.randn_like(wire_sphere) * 0.01, dims=[[1], [1]]).permute(0,2,1)
xy = xy_
occupancy = wg.sum(dim=2).var(dim=1).unsqueeze(1).unsqueeze(2)
print(occupancy.mean().item())
w0 = self.convw0(wg)
x = torch.cat([w0, xy, p, occupancy.expand(-1, 1, seq_len)], dim=1)
x = self.s1(x)
x = self.s2(x)
x = self.s3(x)
x = self.s4(x)
x = self.lin0(x.mean(2)).squeeze()
return self.out(x)
def get_n_params(model):
return sum(p.reshape(-1).shape[0] for p in model.parameters())
| [
"m.dubouchet18@imperial.ac.uk"
] | m.dubouchet18@imperial.ac.uk |
6b52b6ec4e126f452e972de1e72f08164c5c6e7a | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_RelativeDifference/trend_ConstantTrend/cycle_30/ar_12/test_artificial_1024_RelativeDifference_ConstantTrend_30_12_0.py | d0ff3e622857b909a929bdfb980a69ece6beb1b9 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 279 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 30, transform = "RelativeDifference", sigma = 0.0, exog_count = 0, ar_order = 12); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
39f835403646b080d5decc274d92fe1f6d778d3e | 9cedfd5b519f2ef7608313b5b0f1b361aeac8cc6 | /resources/onelinelog.py | c9de6b2a8eea995b7052359e31ab3c538d2b9536 | [] | no_license | fdev31/archx | 50a5bb6e8525ef87048345e40928a05e735cb758 | 5345bee7cc1a9cb0b98490749d9bbb5969b9a8f9 | refs/heads/master | 2020-05-21T03:08:18.532179 | 2019-12-16T21:23:25 | 2019-12-16T21:23:25 | 50,664,593 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | #!/usr/bin/env python
import sys
import codecs
import shutil
import fileinput
import time
PROMPT='> '
DEBUG=0
def univ_file_read(name, mode):
# WARNING: ignores mode argument passed to this function
return open(name, 'rU')
linelen=0
twidth = shutil.get_terminal_size()[0]
logfile=codecs.open('stdout.log', 'w+', encoding='utf-8')
for line in fileinput.input(openhook=univ_file_read):
if DEBUG:
sys.stdout.write(line)
continue
if linelen:
try:
sys.stdout.write(' '*linelen+'\r')
except Exception as e:
print(e)
try:
logfile.write(line)
except Exception as e:
print(e)
line = line.strip().replace('\n', '_')
if not line:
continue
if 'Dload' in line:
line = 'Downloading...'
elif 'db.lck.' in line:
print('DATA BASE LOCKED, rm '+(line.rsplit(' ', 1)[-1][:-1]))
time.sleep(5)
if len(line)+1 > twidth :
line = PROMPT + line[:twidth-10] + '...\r'
else:
line = PROMPT + line + '\r'
sys.stdout.write(line)
sys.stdout.flush()
linelen = len(line) + 1
| [
"fdev31@gmail.com"
] | fdev31@gmail.com |
79ffe575c9f5608527ae2bbd1b2642208f7b21da | af1e8fd6bf305cce661262a4289df74ab886425e | /Troubleshooting and Debugging Techniques/examples/binary_search.py | c173182fb2fc1aba41f2418902e078a744676067 | [] | no_license | Nahid-Hassan/online-learning | 95bf80f205ed33b5071da63c1939baa5e08f13d5 | aee087bc42cba60ef2c3129fb8e96f68de1f44b4 | refs/heads/main | 2023-02-25T23:11:58.008425 | 2021-02-02T09:37:34 | 2021-02-02T09:37:34 | 306,249,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | def binary_search(list, key):
"""Returns the position of key in the list if found, -1 otherwise.
List must be sorted.
"""
left = 0
right = len(list) - 1
while left <= right:
middle = (left + right) // 2
if list[middle] == key:
return middle
if list[middle] > key:
right = middle - 1
if list[middle] < key:
left = middle + 1
return -1
name_list = ['nahid', 'hassan', 'mony', 'mahin', 'meem', 'bristy']
name_list.sort()
print(name_list)
idx = binary_search(name_list, 'meem')
print(idx, name_list[idx])
| [
"nahid.cseru@gmail.com"
] | nahid.cseru@gmail.com |
17fd2ad5ff1c32c21c6b8e6b7ec8a3bac988e23c | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /NMHFTCMqW6j8sXkNd_21.py | c5fbf2a525560148a2a09327ec38d12348248a53 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | """
Create a function that takes a list of two numbers and checks if the **square
root** of the first number is equal to the **cube root** of the second number.
### Examples
check_square_and_cube([4, 8]) ➞ True
check_square_and_cube([16, 48]) ➞ False
check_square_and_cube([9, 27]) ➞ True
### Notes
* Remember to return either `True` or `False`.
* All lists contain _two positive numbers_.
"""
def check_square_and_cube(lst):
return lst[1] == lst[0] ** 1.5
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
f3cdc9b4d2cdd55e81a5b35f33ac217dd6cc97a4 | 58a0ba5ee99ec7a0bba36748ba96a557eb798023 | /Olympiad Solutions/URI/1059.py | a9a1b307c1fe96c5d3f1bca45a294bc8510db5d7 | [
"MIT"
] | permissive | adityanjr/code-DS-ALGO | 5bdd503fb5f70d459c8e9b8e58690f9da159dd53 | 1c104c33d2f56fe671d586b702528a559925f875 | refs/heads/master | 2022-10-22T21:22:09.640237 | 2022-10-18T15:38:46 | 2022-10-18T15:38:46 | 217,567,198 | 40 | 54 | MIT | 2022-10-18T15:38:47 | 2019-10-25T15:50:28 | C++ | UTF-8 | Python | false | false | 240 | py | # Ivan Carvalho
# Solution to https://www.urionlinejudge.com.br/judge/problems/view/1059
# -*- coding: utf-8 -*-
'''
Escreva a sua solução aqui
Code your solution here
Escriba su solución aquí
'''
for i in xrange(2,101,2):
print i
| [
"samant04aditya@gmail.com"
] | samant04aditya@gmail.com |
7b1074e18796d2cef670d1ffbc15893dee538894 | fc2d2163e790741de0c0e1aa337948cfeb5b6ba9 | /tests/syntax/UnpackTwoStars32.py | 3a8683cbd4fd3072ce5acf7dcabd04fe814e5bdc | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | nmoehrle/Nuitka | bcd20531f150ada82c8414620dca6c5424be64d1 | 317d1e4e49ef8b3bdfe2f80f2464040d644588b2 | refs/heads/master | 2023-06-22T09:56:23.604822 | 2017-11-29T14:10:01 | 2017-11-29T14:10:01 | 122,110,166 | 0 | 0 | Apache-2.0 | 2018-02-19T19:29:05 | 2018-02-19T19:29:05 | null | UTF-8 | Python | false | false | 809 | py | # Copyright 2017, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
foo, *bar, baz, *a, b = name.split('/')
| [
"kay.hayen@gmail.com"
] | kay.hayen@gmail.com |
8d417242c48459b312ed486523acbd941413aea9 | 4ef688b93866285bcc27e36add76dc8d4a968387 | /tests/test_kms/test_model.py | 523b1d748ffd9690156bc05d2f272747a9687192 | [
"Apache-2.0"
] | permissive | localstack/moto | cec77352df216cac99d5e0a82d7ada933950a0e6 | b0b2947e98e05d913d7ee2a0379c1bec73f7d0ff | refs/heads/localstack | 2023-09-01T05:18:16.680470 | 2023-07-10T09:00:26 | 2023-08-07T14:10:06 | 118,838,444 | 22 | 42 | Apache-2.0 | 2023-09-07T02:07:17 | 2018-01-25T00:10:03 | Python | UTF-8 | Python | false | false | 1,183 | py | import pytest
from moto.kms.models import KmsBackend
PLAINTEXT = b"text"
REGION = "us-east-1"
@pytest.fixture(name="backend")
def fixture_backend():
return KmsBackend(REGION)
@pytest.fixture(name="key")
def fixture_key(backend):
return backend.create_key(
None, "ENCRYPT_DECRYPT", "SYMMETRIC_DEFAULT", "Test key", None
)
def test_encrypt_key_id(backend, key):
ciphertext, arn = backend.encrypt(key.id, PLAINTEXT, {})
assert ciphertext is not None
assert arn == key.arn
def test_encrypt_key_arn(backend, key):
ciphertext, arn = backend.encrypt(key.arn, PLAINTEXT, {})
assert ciphertext is not None
assert arn == key.arn
def test_encrypt_alias_name(backend, key):
backend.add_alias(key.id, "alias/test/test")
ciphertext, arn = backend.encrypt("alias/test/test", PLAINTEXT, {})
assert ciphertext is not None
assert arn == key.arn
def test_encrypt_alias_arn(backend, key):
backend.add_alias(key.id, "alias/test/test")
ciphertext, arn = backend.encrypt(
f"arn:aws:kms:{REGION}:{key.account_id}:alias/test/test", PLAINTEXT, {}
)
assert ciphertext is not None
assert arn == key.arn
| [
"noreply@github.com"
] | localstack.noreply@github.com |
feccd33264447c92abf14e4458ead9f06a2faa3f | d4f1bd5e52fe8d85d3d0263ede936928d5811bff | /Python/Problem Solving/ETC_algorithm_problem/5-11-2 max heap.py | d2ff607ea6054165b71bd00604e33c7b4fd62e91 | [] | no_license | ambosing/PlayGround | 37f7d071c4402599995a50cac1e7f1a85c6d10dd | 0d5262dbb2fa2128ecb3fd969244fa647b104928 | refs/heads/master | 2023-04-08T04:53:31.747838 | 2023-03-23T06:32:47 | 2023-03-23T06:32:47 | 143,112,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | import heapq
h = []
while True:
n = int(input())
if n == -1:
break
elif n == 0:
print(heapq.heappop(h)[1])
else:
heapq.heappush(h, (-n, n))
| [
"ambosing_@naver.com"
] | ambosing_@naver.com |
d38db2fa69e1cec7a307c36300e70bf1c784d05c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04043/s722084447.py | 63049b4fc35548c7e3243a14c504967fc3f86476 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | A, B, C = map(int, input().split())
num = [0]*11
num[A] += 1
num[B] += 1
num[C] += 1
ans = 'YES' if num[5] == 2 and num[7] == 1 else 'NO'
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9f0b2d91751a39fb0928cb1695e4ef33be1ad02d | d260f1492f1d3cffb72bd4e8c67da7b0724fa5d5 | /kubeflow/fairing/preprocessors/full_notebook.py | 84ff8f31a9bf417bb912c022cc61bddfa05ca6e0 | [
"Apache-2.0"
] | permissive | wyw64962771/fairing | 3be92ab22d596a360c6f8d70f678b3ada265e649 | 0cc639870ea3f773c5ae8a53c0ab16d4cda2ea6c | refs/heads/master | 2020-08-19T13:28:24.778189 | 2019-10-17T12:08:39 | 2019-10-17T12:08:39 | 215,924,578 | 1 | 0 | Apache-2.0 | 2019-10-18T02:23:58 | 2019-10-18T02:23:58 | null | UTF-8 | Python | false | false | 2,380 | py | import os
from kubeflow.fairing.preprocessors.base import BasePreProcessor
from kubeflow.fairing.constants import constants
from kubeflow.fairing.notebook import notebook_util
class FullNotebookPreProcessor(BasePreProcessor):
""" The Full notebook preprocess for the context which comes from BasePreProcessor.
:param BasePreProcessor: a context that gets sent to the builder for the docker build and
sets the entrypoint
"""
# TODO: Allow configuration of errors / timeout options
def __init__(self,
notebook_file=None,
output_file="fairing_output_notebook.ipynb",
input_files=None,
command=None,
path_prefix=constants.DEFAULT_DEST_PREFIX,
output_map=None):
""" Init the full notebook preprocess.
:param notebook_file: the jupyter notebook file.
:param output_file: the output file, the defaut name is 'fairing_output_notebook.ipynb'.
:param input_files: the source files to be processed.
:param command: the command to pass to the builder.
:param path_prefix: the defaut destion path prefix '/app/'.
:param output_map: a dict of files to be added without preprocessing.
"""
if notebook_file is None and notebook_util.is_in_notebook():
notebook_file = notebook_util.get_notebook_name()
if notebook_file is None:
raise ValueError('A notebook_file must be provided.')
relative_notebook_file = notebook_file
# Convert absolute notebook path to relative path
if os.path.isabs(notebook_file[0]):
relative_notebook_file = os.path.relpath(notebook_file)
if command is None:
command = ["papermill", relative_notebook_file, output_file, "--log-output"]
input_files = input_files or []
if relative_notebook_file not in input_files:
input_files.append(relative_notebook_file)
super().__init__(
executable=None,
input_files=input_files,
command=command,
output_map=output_map,
path_prefix=path_prefix)
def set_default_executable(self):
""" Ingore the default executable setting for the full_notebook preprocessor.
"""
pass
| [
"k8s-ci-robot@users.noreply.github.com"
] | k8s-ci-robot@users.noreply.github.com |
50d1f8e859d7710b2a71797166f82bbf97dcfb1f | df1ed60ce7d95a31565c5963ccda404d16b780ba | /src/h02_learn/dataset/dep_label.py | 2667a9d3378c4777ae28ec7222d485504c635aef | [
"MIT"
] | permissive | imperialite/info-theoretic-probing | 471a3c726e8b4e433ae8acaa070fbd964c6640a1 | 70414d5466e8c372187730c018064dd9309dd09a | refs/heads/master | 2022-04-23T00:53:23.283886 | 2020-04-27T16:19:07 | 2020-04-27T16:19:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,315 | py | import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
import torch
from torch.utils.data import Dataset
from h01_data.process import get_data_file_base as get_file_names
from util import constants
from util import util
from .pos_tag import PosTagDataset
class DepLabelDataset(PosTagDataset):
# pylint: disable=too-many-instance-attributes
def load_data_index(self):
data_ud = util.read_data(self.input_name_base % (self.mode, 'ud'))
x_raw, y_raw = [], []
for sentence_ud, words in data_ud:
for i, token in enumerate(sentence_ud):
head = token['head']
rel = token['rel']
if rel == "_" or rel == "root":
continue
x_raw_tail = words[i]
x_raw_head = words[head - 1]
x_raw += [[x_raw_tail, x_raw_head]]
y_raw += [rel]
x_raw = np.array(x_raw)
y_raw = np.array(y_raw)
return x_raw, y_raw
def load_index(self, x_raw, words=None):
if words is None:
words = []
new_words = sorted(list(set(np.unique(x_raw)) - set(words)))
if new_words:
words = np.concatenate([words, new_words])
words_dict = {word: i for i, word in enumerate(words)}
x = np.array([[words_dict[token] for token in tokens] for tokens in x_raw])
self.x = torch.from_numpy(x)
self.words = words
self.n_words = len(words)
def load_data(self):
data_ud = util.read_data(self.input_name_base % (self.mode, 'ud'))
data_embeddings = util.read_data(self.input_name_base % (self.mode, self.representation))
x_raw, y_raw = [], []
for (sentence_ud, words), (sentence_emb, _) in zip(data_ud, data_embeddings):
for i, token in enumerate(sentence_ud):
head = token['head']
rel = token['rel']
if rel == "_" or rel == "root":
continue
x_raw_tail = sentence_emb[i]
x_raw_head = sentence_emb[head - 1]
x_raw += [np.concatenate([x_raw_tail, x_raw_head])]
y_raw += [rel]
x_raw = np.array(x_raw)
y_raw = np.array(y_raw)
return x_raw, y_raw
| [
"tiagopms@gmail.com"
] | tiagopms@gmail.com |
806c4c878888711dca3ec79b8fe335bae9900008 | 430b9e03e36e355bba475df49505011f99fa0819 | /web/第4课:页面交互操作/d5_鼠标操作.py | 7d4eb25bce43899556d622d03e2e63d1e93a663c | [] | no_license | gaoyang1224/mysite | b43e5d5e378b810b94dd60ffcac1c992173cc11a | 72150c67b9590b0498241a1eacb2669a836520ff | refs/heads/master | 2023-05-01T21:42:40.096287 | 2021-05-20T14:40:30 | 2021-05-20T14:40:30 | 368,254,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | import time
from selenium import webdriver
from selenium.webdriver import ActionChains
driver = webdriver.Firefox()
driver.implicitly_wait(4)
driver.get('file:///D:/classes/web_auto_testing/%E7%AC%AC4%E8%AF%BE%EF%BC%9A%E9%A1%B5%E9%9D%A2%E4%BA%A4%E4%BA%92%E6%93%8D%E4%BD%9C/alert_demo.html')
# 复杂版:
# 初始化 ActionChains: 动作链条,
action = ActionChains(driver)
# 定位一个元素
h2 = driver.find_element('xpath', '//h2')
# click 操作
action.click(h2).perform()
time.sleep(5)
# 简单
# h2 = driver.find_element('xpath', '//h2')
# h2.click()
| [
"15195989321@163.com"
] | 15195989321@163.com |
a4da331d66a9bc9ab226ec4306a45994e44a8df7 | 3e59c64c78aa3ffc4ca6ee358ee1a3ba61e2d4af | /energy/activation.py | 596fbb09332eba316b94d644fc50b0773c482779 | [
"MIT"
] | permissive | pminervini/DeepKGC | de35f75fac9c64ca6e09e4ab244552792669678d | ed55d0a28d7607324def7c48ebde98786c11d5e1 | refs/heads/master | 2016-09-06T02:36:47.748324 | 2015-07-06T12:35:07 | 2015-07-06T12:35:07 | 38,617,255 | 5 | 5 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # -*- coding: utf-8 -*-
import theano.tensor as T
# Activation functions
def htanh(x):
return -1. * (x < -1.) + x * (x < 1.) * (x >= -1.) + 1. * (x >= 1)
def hsigm(x):
return x * (x < 1) * (x > 0) + 1. * (x >= 1)
def rect(x):
return x * (x > 0)
def sigm(x):
return T.nnet.sigmoid(x)
def tanh(x):
return T.tanh(x)
def lin(x):
return x
| [
"p.minervini@gmail.com"
] | p.minervini@gmail.com |
d42e178adedceb3d83a4176e7940c42721a0994f | a2b23a8ab40a01903438b22cf964704ad90ea414 | /0x0A-python-inheritance/10-square.py | 945e59b894388c2c68e3a4beff404c0670f4ff3b | [] | no_license | Katorea132/higher_level_programming | b78809d5d2a052c1e9680d24cc547d12ac69c41e | 746f094c10fed8c2497b65c7a18c782e1b7cd3a9 | refs/heads/master | 2022-12-17T04:39:57.794263 | 2020-09-24T19:30:57 | 2020-09-24T19:30:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | #!/usr/bin/python3
"""THis module is for squares
"""
Rekt = __import__("9-rectangle").Rectangle
class Square(Rekt):
"""The square class from the super class rectangle from
the super class geometry
Args:
Rekt (class): super class
"""
def __init__(self, size):
"""Initializer
Args:
size (integer): The size of a side of the square
"""
self.integer_validator("size", size)
self.__size = size
super().__init__(size, size)
def area(self):
"""Returns the area
Returns:
int: The area
"""
return self.__size * self.__size
| [
"katorea132@gmail.com"
] | katorea132@gmail.com |
3fcee134c03e33b7dcf94b71921e4a066cf3c566 | 105ef2d5f8bba13c15deb8c4a2a9af307b4e547a | /Baekjoon/python/11053.py | 212a1537f73cd5c4d20725f1cd212c45c8474320 | [] | no_license | caniro/algo-note | 1ec4c0e08adcb542d3356daf7b6e943af722394f | d237a5b58a67ca453dc7a1a335f99428af2c5df5 | refs/heads/master | 2023-08-29T22:39:35.189711 | 2021-11-04T11:18:07 | 2021-11-04T11:18:07 | 260,473,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | # 가장 긴 증가하는 부분 수열 : https://www.acmicpc.net/problem/11053
from sys import stdin
input = stdin.readline
INF = 1e9
def lis(arr):
if not arr:
return 0
c = [-INF] + [INF] * len(arr)
c[1] = arr[0]
max_length = 1
def search(low, high, value):
if low == high:
return low
elif low + 1 == high:
return high if value > c[low] else low
mid = (low + high) // 2
if c[mid] == value:
return mid
elif c[mid] < value:
return search(mid + 1, high, value)
else:
return search(low, mid, value)
for num in arr[1:]:
if num > c[max_length]:
max_length += 1
c[max_length] = num
else:
next_idx = search(1, max_length, num)
c[next_idx] = num
return max_length
N = int(input().rstrip())
A = [int(n) for n in input().rstrip().split()]
print(lis(A))
| [
"caniro@naver.com"
] | caniro@naver.com |
6af8e050da68bfdedfdc86850a2cfb29c077ba0a | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/twilio/twilio/rest/preview/trusted_comms/business/__init__.py | f8c935fdadfa88c9e90f8212fc00caf550491736 | [
"LicenseRef-scancode-other-permissive"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:4568e5a8fab302e3c70ed11607b218146e4027e860e186373a1901bf7e49b1cc
size 8394
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
375a99607fd2f2f1a217329571e15ee926971bc9 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/1332.py | 14d48f941dcb48478886f954b0ba13b7112a23ce | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | def line(f):
return f.readline().strip()
f = open("A-small-attempt0.in", "r")
o = open("1.out", "w")
T = int(line(f))
for t in xrange(T):
ans1 = int(line(f))
arr1 = []
for i in xrange(4):
arr1.append(map(int, line(f).split()))
ans2 = int(line(f))
arr2 = []
for i in xrange(4):
arr2.append(map(int, line(f).split()))
overlap = set(arr1[ans1-1]).intersection(set(arr2[ans2-1]))
if len(overlap) == 0:
s = "Case #%d: Volunteer cheated!" % (t+1)
elif len(overlap) == 1:
s = "Case #%d: %d" % (t+1, overlap.pop())
else:
s = "Case #%d: Bad magician!" % (t+1)
print>>o, s
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
45792ef3fd3e901732b4fa5547b889acb1f5ba55 | baf8ccd12b27d0882c75a9c3845a0679e831f618 | /22_numerai/rl/sarsa.py | b42dcf00bd73335c856a2ee0f4ee839362e9fd06 | [
"MIT"
] | permissive | Tjorriemorrie/trading | c55d545a0a09e3fb92673696e95dd66b02858ab6 | aafa15a6c564bfa86948ab30e33d554172b38a3e | refs/heads/master | 2022-12-13T20:57:23.591343 | 2021-07-07T20:28:34 | 2021-07-07T20:28:34 | 28,739,306 | 2 | 2 | MIT | 2022-07-06T20:01:28 | 2015-01-03T08:55:17 | q | UTF-8 | Python | false | false | 2,238 | py | import gzip
import logging
import operator
import os
import pickle
from world import World
logging.getLogger(__name__)
class Sarsa():
def __init__(self, filename):
self.filename = filename
self.world = World()
self.alpha = 0.
self.epsilon = self.alpha / 2.
self.delta = None
def __enter__(self):
try:
with gzip.open(self.filename) as fz:
q = pickle.load(fz)
except (IOError, EOFError) as e:
logging.warn('Could not load Q at {}'.format(self.filename))
q = {}
self.q = q
logging.debug('Q loaded')
def __exit__(self, exc_type, exc_value, traceback):
# filename_tmp = '{0}/models/tmp.pklz'.format(os.path.realpath(os.path.dirname(__file__)))
# filename = '{0}/models/{1}_{2}.pklz'.format(os.path.realpath(os.path.dirname(__file__)), currency, interval)
with gzip.open(self.filename, 'wb') as fz:
pickle.dump(self.q, fz)
# os.rename(filename_tmp, filename)
logging.debug('Q saved')
def train(self):
logging.info('training...')
# reset delta
self.delta = None
# initial state
s = getState(df, periods)
# initial action
a = getAction(q, s, epsilon, actions)
# get reward
r, ticks = getReward(df, a, pip_mul, std)
# get delta
d = getDelta(q, s, a, r)
# update Q
q = updateQ(q, s, a, d, r, alpha)
return q, r, d, ticks
def summarizeActions(q):
summary_total = {}
summary_count = {}
for key, value in q.iteritems():
state, action = key.split('|')
# total
action_total = summary_total.get(action, 0)
action_total += value
action_total /= 2
summary_total[action] = action_total
action_count = summary_count.get(action, 0)
action_count += 1
summary_count[action] = action_count
summary_sorted = sorted(summary_total.items(), key=operator.itemgetter(1))
for action, info in summary_sorted:
logging.error('{0:10s} after {2} states with {1:.4f} avg'.format(action, info, summary_count[action]))
| [
"jacoj82@gmail.com"
] | jacoj82@gmail.com |
c0b2dc52c6067fe4d6acf3ac56599bffd2491b3e | e3b9aa9b17ebb55e53dbc4fa9d1f49c3a56c6488 | /minfraud/komand_minfraud/actions/email_lookup/action.py | 9925063bfcaa673b4be293d6606ed91c7b12b331 | [
"MIT"
] | permissive | OSSSP/insightconnect-plugins | ab7c77f91c46bd66b10db9da1cd7571dfc048ab7 | 846758dab745170cf1a8c146211a8bea9592e8ff | refs/heads/master | 2023-04-06T23:57:28.449617 | 2020-03-18T01:24:28 | 2020-03-18T01:24:28 | 248,185,529 | 1 | 0 | MIT | 2023-04-04T00:12:18 | 2020-03-18T09:14:53 | null | UTF-8 | Python | false | false | 2,803 | py | import komand
from .schema import EmailLookupInput, EmailLookupOutput
# Custom imports below
import minfraud
class EmailLookup(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='email_lookup',
description='Query email info',
input=EmailLookupInput(),
output=EmailLookupOutput())
def run(self, params={}):
address = params.get('address')
domain = params.get('domain')
email = params.get('email')
user = self.connection.user
license = self.connection.license
# Set client
client = minfraud.Client(user, license)
# Define request
request = {'device': {'ip_address': address}}
email_dic = {}
if domain:
email_dic['domain'] = domain
if email:
email_dic['address'] = email
# Add email_dic to request
if email_dic:
request['email'] = email_dic
else:
self.logger.info('No email info provided')
try:
# Generate request
insights = client.insights(request)
except minfraud.AuthenticationError:
self.logger.error('Authentication failed')
raise
except minfraud.InsufficientFundsError:
self.logger.error('Insufficient funds')
raise
except minfraud.InvalidRequestError:
self.logger.error('Invalid request')
raise
except minfraud.HttpError:
self.logger.error('Unexpected HTTP error occurred')
raise
except minfraud.MinFraudError:
self.logger.error('Unexpected content received from server')
raise
# Overall risk score
risk_score = str(insights.risk_score)
#TO-DO - rename email to email_result
# Email info
is_free = insights.email.is_free
is_high_risk = insights.email.is_high_risk
email_result = {'is_free': is_free,
'is_high_risk': is_high_risk
}
# Clean email dict
email_result = komand.helper.clean_dict(email_result)
return {'risk_score': risk_score, 'email_result': email_result}
def test(self):
user = self.connection.user
license = self.connection.license
# Set client
client = minfraud.Client(user, license)
# Define request
request = {'device': {'ip_address': '8.8.8.8'}}
try:
# Generate request
insights = client.insights(request)
except minfraud.AuthenticationError:
self.logger.error('Authentication failed')
raise
except minfraud.InsufficientFundsError:
self.logger.error('Insufficient funds')
raise
return {}
| [
"jonschipp@gmail.com"
] | jonschipp@gmail.com |
2e0cec4b5e1bec814164ba1d46fcb45d8a657b93 | 42d3d37a3dd22402154da4f4bd020afd7b7bad58 | /examples/adspygoogle/adwords/v201109/basic_operations/add_ad_groups.py | af1d61fdd7c560ea8a6a05c35e2fac85f4c8c218 | [
"Apache-2.0"
] | permissive | nearlyfreeapps/python-googleadwords | 1388316ec4f8d9d6074688ec4742872b34b67636 | b30d90f74248cfd5ca52967e9ee77fc4cd1b9abc | refs/heads/master | 2020-06-03T23:05:08.865535 | 2012-08-02T21:46:16 | 2012-08-02T21:46:16 | 5,278,295 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,856 | py | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds ad groups to a given campaign. To get ad groups, run
get_ad_groups.py.
Tags: AdGroupService.mutate
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
from adspygoogle.common import Utils
campaign_id = 'INSERT_CAMPAIGN_ID_HERE'
def main(client, campaign_id):
# Initialize appropriate service.
ad_group_service = client.GetAdGroupService(
'https://adwords-sandbox.google.com', 'v201109')
# Construct operations and add ad groups.
operations = [{
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'name': 'Earth to Mars Cruises #%s' % Utils.GetUniqueName(),
'status': 'ENABLED',
'bids': {
'xsi_type': 'ManualCPCAdGroupBids',
'keywordMaxCpc': {
'amount': {
'microAmount': '1000000'
}
},
# Optional field.
'keywordContentMaxCpc': {
'amount': {
'microAmount': '2000000'
}
}
}
}
}, {
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'name': 'Earth to Venus Cruises #%s' % Utils.GetUniqueName(),
'status': 'ENABLED',
'bids': {
'xsi_type': 'ManualCPCAdGroupBids',
'keywordMaxCpc': {
'amount': {
'microAmount': '2000000'
}
},
}
}
}]
ad_groups = ad_group_service.Mutate(operations)[0]
# Display results.
for ad_group in ad_groups['value']:
print ('Ad group with name \'%s\' and id \'%s\' was added.'
% (ad_group['name'], ad_group['id']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, campaign_id)
| [
"ahalligan@nearlyfreehosting.com"
] | ahalligan@nearlyfreehosting.com |
57999ae9ce2381856766849022c89cd3e153c7e4 | 9b4fe9c2693abc6ecc614088665cbf855971deaf | /744.find-smallest-letter-greater-than-target.py | 49625d5f3841c1e6060e6f275b7326d894db8a48 | [
"MIT"
] | permissive | windard/leeeeee | e795be2b9dcabfc9f32fe25794878e591a6fb2c8 | 0dd67edca4e0b0323cb5a7239f02ea46383cd15a | refs/heads/master | 2022-08-12T19:51:26.748317 | 2022-08-07T16:01:30 | 2022-08-07T16:01:30 | 222,122,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,823 | py | # coding=utf-8
#
# @lc app=leetcode id=744 lang=python
#
# [744] Find Smallest Letter Greater Than Target
#
# https://leetcode.com/problems/find-smallest-letter-greater-than-target/description/
#
# algorithms
# Easy (43.56%)
# Likes: 245
# Dislikes: 376
# Total Accepted: 46.6K
# Total Submissions: 104.9K
# Testcase Example: '["c","f","j"]\n"a"'
#
#
# Given a list of sorted characters letters containing only lowercase letters,
# and given a target letter target, find the smallest element in the list that
# is larger than the given target.
#
# Letters also wrap around. For example, if the target is target = 'z' and
# letters = ['a', 'b'], the answer is 'a'.
#
#
# Examples:
#
# Input:
# letters = ["c", "f", "j"]
# target = "a"
# Output: "c"
#
# Input:
# letters = ["c", "f", "j"]
# target = "c"
# Output: "f"
#
# Input:
# letters = ["c", "f", "j"]
# target = "d"
# Output: "f"
#
# Input:
# letters = ["c", "f", "j"]
# target = "g"
# Output: "j"
#
# Input:
# letters = ["c", "f", "j"]
# target = "j"
# Output: "c"
#
# Input:
# letters = ["c", "f", "j"]
# target = "k"
# Output: "c"
#
#
#
# Note:
#
# letters has a length in range [2, 10000].
# letters consists of lowercase letters, and contains at least 2 unique
# letters.
# target is a lowercase letter.
#
#
#
class Solution(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
min_length = float("inf")
min_char = None
for letter in letters:
if (ord(letter) - ord(target)) % 26 < min_length:
if not ord(letter) - ord(target):
continue
min_length = (ord(letter) - ord(target)) % 26
min_char = letter
return min_char
| [
"windard@qq.com"
] | windard@qq.com |
52c105db51a9729ca761c5db76853562fb4dd51a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03044/s052008101.py | 8dc7ecea338f828c998221cffb71731fd4019ce9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | import sys
from collections import deque
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
def main():
N = int(readline())
G = [[] for _ in range(N)]
for _ in range(N - 1):
u, v, w = map(int, readline().split())
G[u - 1].append((v - 1, w))
G[v - 1].append((u - 1, w))
dist = [-1] * N
color = [0] * N
dist[0] = 0
queue = deque([0])
while queue:
v = queue.popleft()
for nv, cost in G[v]:
if dist[nv] == -1:
dist[nv] = dist[v] + cost
color[nv] = dist[nv] % 2
queue.append(nv)
print(*color, sep='\n')
return
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
069e8afc3bae88fc490dc7db80adf1c3c2ff5992 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2375/60595/257705.py | dfc1e79a8269cf592621f58eb91b8d16b18a863c | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | class Graph(object):
def __init__(self, maps):
self.maps = maps
self.nodenum = self.get_nodenum()
self.edgenum = self.get_edgenum()
def get_nodenum(self):
return len(self.maps)
def get_edgenum(self):
count = 0
for i in range(self.nodenum):
for j in range(i):
if self.maps[i][j] > 0:
count += 1
return count
def prim(self):
list = []
if self.nodenum <= 0 or self.edgenum < self.nodenum - 1:
return list
selected_node = [0]
candidate_node = [i for i in range(1, self.nodenum)]
while len(candidate_node) > 0:
begin, end, minweight = 0, 0, 9999
for i in selected_node:
for j in candidate_node:
if self.maps[i][j] < minweight:
minweight = self.maps[i][j]
begin = i
end = j
list.append([begin, end, minweight])
selected_node.append(end)
candidate_node.remove(end)
return list
def Test():
n,m=map(int,input().split())
mat=[]
for i in range(0,n):
line=[]
for j in range(0,n):
line.append(99999)
mat.append(line)
for i in range(0,m):
s=input().split()
try:
mat[int(s[0])-1][int(s[1])-1]=int(s[2])
mat[int(s[1]) - 1][int(s[0]) - 1] = int(s[2])
except:
print(n,m)
graph=Graph(mat)
message=graph.prim()
res=0
for i in range(0,len(message)):
res=max(res,message[i][2])
print(res,end="")
if __name__ == "__main__":
Test() | [
"1069583789@qq.com"
] | 1069583789@qq.com |
615e70e685775ea91236d4f9d8bf8ffa6acd6d50 | 9e28200b71d43de1e122a964e88f1b547bfde465 | /question_leetcode/159_3.py | ac9e41c85595c93128f7e311a207156c3c39e650 | [] | no_license | paul0920/leetcode | 6f8a7086eefd3e9bccae83752ef41cbfee1acaea | 474886c5c43a6192db2708e664663542c2e39548 | refs/heads/master | 2023-08-19T14:10:10.494355 | 2021-09-16T20:26:50 | 2021-09-16T20:26:50 | 290,560,326 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | import collections
s = "ecebaa"
s = "bacc"
start = 0
count = collections.defaultdict(int)
res = 0
res_string = []
for idx, c in enumerate(s):
count[c] += 1
if len(count) > 2:
count[s[start]] -= 1
if not count[s[start]]:
count.pop(s[start])
start += 1
res = max(res, idx - start + 1)
res_string.append(s[start:idx+1])
print res
print res_string
| [
"39969716+paul0920@users.noreply.github.com"
] | 39969716+paul0920@users.noreply.github.com |
853bd821d4c8c5ac1a86b930a9840d78d132224a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02873/s837577213.py | b7d5cafe31946f81d03165a317e1a59b8ade8854 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | s=input()
l=[]
i=0
while i<len(s):
k=0
while s[i]=='<' if i<len(s) else False:
k+=1
i+=1
if k>0:
l.append(k)
k=0
while s[i]=='>' if i<len(s) else False:
k+=1
i+=1
if k>0:
l.append(k)
sm=0
for i in l:
sm+=(i*(i+1))//2
for i in range(0 if s[0]=='<' else 1,len(l)-1,2):
sm-=min(l[i],l[i+1])
print(sm) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0986e209f3f4491736d25bff9acd114e0c92e812 | 4c9eb8584b16bb103a1401a8f297f62589941c01 | /flo/cli/Rdr2Geo.py | 37d70f06ef8cebd48f5ae3fad9407d2f1276678c | [] | no_license | pyre/flo | d62e0bba61926fd395df1c2767198c5743ade531 | 7b61a7a4cf12d4448b99f1b841866fe31a27bb61 | refs/heads/master | 2023-03-08T11:21:55.874526 | 2021-09-28T06:47:10 | 2021-09-28T06:47:10 | 156,036,991 | 5 | 0 | null | 2023-02-28T17:42:13 | 2018-11-04T00:51:52 | JavaScript | UTF-8 | Python | false | false | 509 | py | #-*- coding: utf-8 -*-
# support
import flo
# superclass
from .Workflow import Workflow
# declaration
class Rdr2Geo(Workflow, family="flo.cli.rdr2geo"):
"""
Invoke the {rdr2geo} workflow to compute the transformation from radar coordinates to
geodetic coordinates for a given SLC
"""
# public state
flow = flo.model.flows.flow()
flow.default = flo.isce3.workflows.rdr2geo # by default, make the one named after me...
flow.doc = "the workflow to execute"
# end of file
| [
"michael.aivazis@para-sim.com"
] | michael.aivazis@para-sim.com |
cd1eb7e40c810db20c3ae7b49d3798be2f3e58b5 | 34597ad1d89ee507473c5d91f03a5819143ec48f | /EBOs/UserV1/model.py | ab856ee1f233ace7266afeb7b415be1894a6ca4b | [] | no_license | rmetcalf9/dockPondSampleEBOs | 082c3a18961665e02402f0f14e3180019fc75bde | abd8d973feee03bcbf52938d6364c93d38aa2d5c | refs/heads/master | 2020-03-12T16:26:11.636502 | 2018-06-29T10:58:17 | 2018-06-29T10:58:17 | 130,716,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py |
from flask_restplus import fields
def getModel(flaskRestPlusAPI):
#Function must be declared inside getModel function as this is the only part that is imported by dockPond
def getTypeModel(flaskRestPlusAPI, typeName):
if typeName=='http://ic.ac.uk/AIAMetaData/AIAComponents/EnterpriseObjectLibrary/Core/IC_EBO/User/V1/UserEBO:UserEBOTypeV1':
return flaskRestPlusAPI.model('UserEBOTypeV1', {
'Identification': fields.Nested(getTypeModel(flaskRestPlusAPI, 'http://ic.ac.uk/AIAMetaData/AIAComponents/EnterpriseObjectLibrary/Core/IC_EBO/User/V1/UserEBO:IdentificationTypeV1')),
'CID': fields.String(default='',description='College CID'),
'Status': fields.String(default='',description='Status of the User'),
})
if typeName=='http://ic.ac.uk/AIAMetaData/AIAComponents/EnterpriseObjectLibrary/Core/IC_EBO/User/V1/UserEBO:IdentificationTypeV1':
return flaskRestPlusAPI.model('IdentificationTypeV1', {
'UserName': fields.String(default='',description='Cannonical User identifier'),
})
raise Exception('Searching for unknown type')
return flaskRestPlusAPI.model('UserEBOV1', {
'UserEBO': fields.Nested(getTypeModel(flaskRestPlusAPI, 'http://ic.ac.uk/AIAMetaData/AIAComponents/EnterpriseObjectLibrary/Core/IC_EBO/User/V1/UserEBO:UserEBOTypeV1')),
})
| [
"rmetcalf9@googlemail.com"
] | rmetcalf9@googlemail.com |
02cca8d92f564c91c6c3d266eaef9202830aaabd | 2fdc719bea50f10e2a4fc507d25b83ff4e612071 | /projects/buck/bucklets/tools/download_all.py | a70cbda5baa067b219192369df1ce9371cbd8098 | [
"Apache-2.0"
] | permissive | aslamz/appium | 5610b61598b5d74a41c43b2d6729f21f6978b7c8 | 778fe9c92041c99f06d9d074caed2f9c61c8bbb0 | refs/heads/master | 2022-06-01T18:46:07.210870 | 2021-01-04T12:56:25 | 2021-01-04T12:56:25 | 40,705,347 | 0 | 0 | Apache-2.0 | 2022-05-20T20:52:31 | 2015-08-14T08:53:27 | Ruby | UTF-8 | Python | false | false | 1,306 | py | #!/usr/bin/python
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from optparse import OptionParser
import re
from subprocess import check_call, CalledProcessError, Popen, PIPE
MAIN = ['//:classpath']
PAT = re.compile(r'"(//.*?)" -> "//bucklets/tools:download_file"')
opts = OptionParser()
opts.add_option('--src', action='store_true')
args, _ = opts.parse_args()
targets = set()
p = Popen(['buck', 'audit', 'classpath', '--dot'] + MAIN, stdout = PIPE)
for line in p.stdout:
m = PAT.search(line)
if m:
n = m.group(1)
if args.src and n.endswith('__download_bin'):
n = n[:-4] + '_src'
targets.add(n)
r = p.wait()
if r != 0:
exit(r)
try:
check_call(['buck', 'build'] + sorted(targets))
except CalledProcessError as err:
exit(1)
| [
"code@bootstraponline.com"
] | code@bootstraponline.com |
5d60063af802f6cb1f0a9b6e580171f272016318 | 9ce3080999a69f1d330356645fe3e655052cf954 | /aiida_registry/make_pages.py | 0c4911cf752d6d8c8ef644290e6d20c49269cc15 | [] | no_license | chrisjsewell/aiida-registry | b0969b8298e8e5108653ec56ac54a8807e3cc1e6 | a2cc2cf6c61e835e535d6af6125efcdf7dcae33b | refs/heads/master | 2021-06-16T10:17:19.887994 | 2019-10-30T18:00:55 | 2019-10-30T18:00:55 | 148,847,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,634 | py | # -*- coding: utf-8 -*-
"""Generate HTML pages for plugin registry.
Reads plugin-metadata.json produced by fetch_metadata.
"""
from __future__ import absolute_import
from __future__ import print_function
import codecs
import json
import os
import shutil
from collections import defaultdict
from jinja2 import Environment, PackageLoader, select_autoescape
from . import othercolorclass, entrypoint_metainfo, main_entrypoints, PLUGINS_METADATA, entrypointtypes, state_dict
# Subfolders
OUT_FOLDER = 'out'
STATIC_FOLDER = 'static'
HTML_FOLDER = 'plugins' # Name for subfolder where HTMLs for plugins are going to be sitting
TEMPLATES_FOLDER = 'templates'
# Absolute paths
pwd = os.path.split(os.path.abspath(__file__))[0]
STATIC_FOLDER_ABS = os.path.join(pwd, STATIC_FOLDER)
entrypoints_count = defaultdict(list)
other_entrypoint_names = set()
def get_html_plugin_fname(plugin_name):
import string
valid_characters = set(string.ascii_letters + string.digits + '_-')
simple_string = "".join(c for c in plugin_name if c in valid_characters)
return "{}.html".format(simple_string)
def get_summary_info(entry_points):
"""Get info for plugin detail page.
"""
global entrypoints_count, other_entrypoint_names
summary_info = []
ep = entry_points.copy()
for entrypoint_name in main_entrypoints:
try:
num = len(ep.pop(entrypoint_name))
if num > 0:
summary_info.append({
"colorclass":
entrypoint_metainfo[entrypoint_name]['colorclass'],
"text":
entrypoint_metainfo[entrypoint_name]['shortname'],
"count":
num
})
entrypoints_count[entrypoint_name].append(num)
except KeyError:
#No specific entrypoints, pass
pass
# Check remaining non-empty entrypoints
remaining = [ep_name for ep_name in ep if ep[ep_name]]
remaining_count = [len(ep[ep_name]) for ep_name in ep if ep[ep_name]]
total_count = sum(remaining_count)
if total_count:
other_elements = []
for ep_name in remaining:
try:
other_elements.append(
entrypoint_metainfo[ep_name]['shortname'])
except KeyError:
for strip_prefix in ['aiida.']:
if ep_name.startswith(strip_prefix):
ep_name = ep_name[len(strip_prefix):]
break
other_elements.append(
ep_name.replace('_', ' ').replace('.', ' ').capitalize())
summary_info.append({
"colorclass":
othercolorclass,
"text":
'Other ({})'.format(format_entry_points_list(other_elements)),
"count":
total_count
})
entrypoints_count['other'].append(total_count)
other_entrypoint_names.update(other_elements)
return summary_info
def format_entry_points_list(ep_list):
"""Return string of entry points, respecting some limit."""
import copy
max_len = 5
tmp = sorted(copy.copy(ep_list))
if len(tmp) > max_len:
tmp = tmp[:max_len] + ['...']
return ", ".join(tmp)
def validate_plugin_entry_points(plugin_data):
"""Validate that all registered entry points start with the registered entry point root."""
try:
entry_point_root = plugin_data['entry_point']
except KeyError:
# plugin should not specify entry points
entry_point_root = 'MISSING'
for ep_list in plugin_data['entry_points'].values():
for ep in ep_list:
ep_string, _path = ep.split('=')
ep_string = ep_string.strip()
if not ep_string.startswith(entry_point_root):
print(
" >> WARNING: Entry point '{}' does not start with '{}'".
format(ep_string, entry_point_root))
def global_summary():
"""Compute summary of plugin registry."""
global entrypoints_count, other_entrypoint_names
global_summary = []
for entrypoint_name in main_entrypoints:
global_summary.append({
'name':
entrypoint_metainfo[entrypoint_name]['shortname'],
'colorclass':
entrypoint_metainfo[entrypoint_name]['colorclass'],
'num_entries':
len(entrypoints_count[entrypoint_name]),
'total_num':
sum(entrypoints_count[entrypoint_name]),
})
global_summary.append({
'name':
"Other",
'tooltip':
format_entry_points_list(other_entrypoint_names),
'colorclass':
othercolorclass,
'num_entries':
len(entrypoints_count['other']),
'total_num':
sum(entrypoints_count['other'])
})
return global_summary
def make_pages():
# Create output folder, copy static files
if os.path.exists(OUT_FOLDER):
shutil.rmtree(OUT_FOLDER)
os.mkdir(OUT_FOLDER)
os.mkdir(os.path.join(OUT_FOLDER, HTML_FOLDER))
shutil.copytree(STATIC_FOLDER_ABS, os.path.join(OUT_FOLDER, STATIC_FOLDER))
env = Environment(
loader=PackageLoader('aiida_registry.mod'),
autoescape=select_autoescape(['html', 'xml']),
)
with open(PLUGINS_METADATA) as f:
plugins_metadata = json.load(f)
# Create HTML view for each plugin
for plugin_name, plugin_data in plugins_metadata.items():
print(" - {}".format(plugin_name))
subpage = os.path.join(HTML_FOLDER, get_html_plugin_fname(plugin_name))
subpage_abspath = os.path.join(OUT_FOLDER, subpage)
plugin_data['subpage'] = subpage
plugin_data[
'entrypointtypes'] = entrypointtypes # add a static entrypointtypes dictionary
plugin_data["summaryinfo"] = get_summary_info(
plugin_data["entry_points"])
plugin_data['state_dict'] = state_dict
# Write plugin html
plugin_html = env.get_template("singlepage.html").render(**plugin_data)
with codecs.open(subpage_abspath, 'w', 'utf-8') as f:
f.write(plugin_html)
print(" - Page {} generated.".format(subpage))
all_data = {}
all_data['plugins'] = plugins_metadata
all_data['globalsummary'] = global_summary()
print("[main index]")
rendered = env.get_template("main_index.html").render(**all_data)
outfile = os.path.join(OUT_FOLDER, 'index.html')
with codecs.open(outfile, 'w', 'utf-8') as f:
f.write(rendered)
print(" - index.html generated")
| [
"leopold.talirz@gmail.com"
] | leopold.talirz@gmail.com |
54af66ff4d6027355a3710a71ff0203770426322 | c81d7dfef424b088bf2509a1baf406a80384ea5a | /venv/Lib/site-packages/whitenoise/httpstatus_backport.py | fcb1c22f1d45ec7f7fc3b25ffc361c1df72b45bc | [] | no_license | Goutham2591/OMK_PART2 | 111210d78fc4845481ed55c852b8f2f938918f4a | cb54fb21ebf472bffc6ee4f634bf1e68303e113d | refs/heads/master | 2022-12-10T01:43:08.213010 | 2018-04-05T02:09:41 | 2018-04-05T02:09:41 | 124,828,094 | 0 | 1 | null | 2022-12-07T23:43:03 | 2018-03-12T03:20:14 | Python | UTF-8 | Python | false | false | 558 | py | """
Very partial backport of the `http.HTTPStatus` enum from Python 3.5
This implements just enough of the interface for our purposes, it does not
attempt to be a full implementation.
"""
class HTTPStatus(int):
phrase = None
def __new__(cls, code, phrase):
instance = int.__new__(cls, code)
instance.phrase = phrase
return instance
HTTPStatus.OK = HTTPStatus(200, 'OK')
HTTPStatus.NOT_MODIFIED = HTTPStatus(304, 'Not Modified')
HTTPStatus.METHOD_NOT_ALLOWED = HTTPStatus(405, 'Method Not Allowed')
| [
"amatar@unomaha.edu"
] | amatar@unomaha.edu |
39cd46f95479b5459cef6c53ce8edc1945642153 | 79bb7105223895235263fd391906144f9f9645fd | /python/kernel_tests/identity_op_py_test.py | 7cde987900cb2e034c0d925eba85540adc313147 | [] | no_license | ml-lab/imcl-tensorflow | f863a81bfebe91af7919fb45036aa05304fd7cda | 54ab3ec2e32087ce70ecae2f36b56a8a92f2ba89 | refs/heads/master | 2021-01-22T06:37:18.129405 | 2016-06-08T15:53:28 | 2016-06-08T15:53:28 | 63,518,098 | 1 | 2 | null | 2016-07-17T06:29:14 | 2016-07-17T06:29:13 | null | UTF-8 | Python | false | false | 2,365 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for IdentityOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import gen_array_ops
class IdentityOpTest(tf.test.TestCase):
def testInt32_6(self):
with self.test_session():
value = tf.identity([1, 2, 3, 4, 5, 6]).eval()
self.assertAllEqual(np.array([1, 2, 3, 4, 5, 6]), value)
def testInt32_2_3(self):
with self.test_session():
inp = tf.constant([10, 20, 30, 40, 50, 60], shape=[2, 3])
value = tf.identity(inp).eval()
self.assertAllEqual(np.array([[10, 20, 30], [40, 50, 60]]), value)
def testString(self):
source = [b"A", b"b", b"C", b"d", b"E", b"f"]
with self.test_session():
value = tf.identity(source).eval()
self.assertAllEqual(source, value)
def testIdentityShape(self):
with self.test_session():
shape = [2, 3]
array_2x3 = [[1, 2, 3], [6, 5, 4]]
tensor = tf.constant(array_2x3)
self.assertEquals(shape, tensor.get_shape())
self.assertEquals(shape, tf.identity(tensor).get_shape())
self.assertEquals(shape, tf.identity(array_2x3).get_shape())
self.assertEquals(shape, tf.identity(np.array(array_2x3)).get_shape())
def testRefIdentityShape(self):
with self.test_session():
shape = [2, 3]
tensor = tf.Variable(tf.constant([[1, 2, 3], [6, 5, 4]], dtype=tf.int32))
self.assertEquals(shape, tensor.get_shape())
self.assertEquals(shape, gen_array_ops._ref_identity(tensor).get_shape())
if __name__ == "__main__":
tf.test.main()
| [
"mrlittlezhu@gmail.com"
] | mrlittlezhu@gmail.com |
00c674bec719f04e064532c7307ee71bc50f8bbc | 8b6cd902deb20812fba07f1bd51a4460d22adc03 | /back-end/.history/djreact/users/serializers_20191221131418.py | 4a84b43397e2a944a5fd21996d7d0d6712fd600d | [] | no_license | vishaldenzil/Django-react- | f3a49d141e0b6882685b7eaa4dc43c84857f335a | 35b6d41f6dacb3bddcf7858aa4dc0d2fe039ff98 | refs/heads/master | 2022-11-08T09:27:02.938053 | 2020-05-29T04:53:52 | 2020-05-29T04:53:52 | 267,768,028 | 0 | 1 | null | 2022-10-15T14:08:30 | 2020-05-29T04:52:20 | Python | UTF-8 | Python | false | false | 190 | py | from rest_framework import serializers
from .models import User
class UserRegistrationSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = __all__
| [
"vishal.denzil@ezedox.com"
] | vishal.denzil@ezedox.com |
f89422d908d4ded0742b533ea5c45917262a21e9 | e47b6d86c2309c857c9af4e84ff2e30455030681 | /Bridge.py | 0c456d90f8e0be7e8fb10b816da313a991482ee8 | [] | no_license | bigeyesung/DesignPattern | 39aec1d9c549ec7fce5bfe5a67a65267692786d8 | 4d2e48f6f053b5a9b6a87e73cdb79c5978592ab6 | refs/heads/master | 2020-08-17T11:05:42.104343 | 2020-07-07T20:02:42 | 2020-07-07T20:02:42 | 215,656,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | from abc import ABC, abstractmethod
class Abstraction:
def __init__(self, implementation: Implementation):
self.implementation = implementation
def operation(self):
return self.implementation.operation_implementation()
class ExtendedAbstraction(Abstraction):
def operation(self):
return self.implementation.operation_implementation()
class Implementation(ABC):
@abstractmethod
def operation_implementation(self):
pass
class ConcreteImplementationA(Implementation):
def operation_implementation(self):
return "platform A."
class ConcreteImplementationB(Implementation):
def operation_implementation(self):
return "platform B."
def client_code(abstraction: Abstraction):
print(abstraction.operation(), end="")
if __name__ == "__main__":
implementation = ConcreteImplementationA()
abstraction = Abstraction(implementation)
client_code(abstraction)
print("\n")
implementation = ConcreteImplementationB()
abstraction = ExtendedAbstraction(implementation)
client_code(abstraction) | [
"sungchenhsi@gmail.com"
] | sungchenhsi@gmail.com |
ee92648ad5b8a4be878dc87469075f80bd3a442d | cdd79cef15bdf6a0b9098e27028bbe38607bc288 | /蟻本/2-3_最長共通部分文字列問題_配るDP.py | d9e557cf8591cc2a57a19eb9d8c300f6120fd617 | [] | no_license | nord2sudjp/atcoder | ee35a3eb35717485dc62627172de24c9dac102fb | 6b1cc5102a615492cc7ff8a33813bbb954641782 | refs/heads/master | 2023-08-25T11:27:14.205593 | 2021-09-27T05:43:04 | 2021-09-27T05:43:04 | 302,855,505 | 0 | 0 | null | null | null | null | SHIFT_JIS | Python | false | false | 620 | py | N,M=map(int,input().split())
S=input()
T=input()
MAX_N=N+2
MAX_M=M+2
DP=[[0]*(MAX_N) for _ in range(MAX_M)]
#DP[i+1][j+1] : S[i]T[j]に対するLCSの長さ
for i in range(N):
for j in range(M):
# i,jは文字列としては現在を見ている
# DPとしては過去のDPを見ている
# DP[i][j]は文字列S[i]T[j]までの共通文字列の長さを表す
DP[i][j+1]=max(DP[i][j+1],DP[i][j])
DP[i+1][j]=max(DP[i+1][j],DP[i][j])
if S[i]==T[j]:
DP[i+1][j+1]=max(DP[i+1][j+1],DP[i][j]+1) #dp[i][j]までの長さに1を足した物
print(DP[N][M]) | [
"nord2sudjp@gmail.com"
] | nord2sudjp@gmail.com |
8055239902f815052d3b4a078afeb5a0d13730b7 | 459929ce79538ec69a6f8c32e608f4e484594d68 | /venv/Lib/site-packages/kubernetes/client/models/extensions_v1beta1_ingress_backend.py | efa600d193b4a86f19a2dcc154c8bf3990938050 | [] | no_license | yychai97/Kubernetes | ec2ef2a98a4588b7588a56b9d661d63222278d29 | 2955227ce81bc21f329729737b5c528b02492780 | refs/heads/master | 2023-07-02T18:36:41.382362 | 2021-08-13T04:20:27 | 2021-08-13T04:20:27 | 307,412,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,474 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.15
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ExtensionsV1beta1IngressBackend(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'service_name': 'str',
'service_port': 'object'
}
attribute_map = {
'service_name': 'serviceName',
'service_port': 'servicePort'
}
def __init__(self, service_name=None, service_port=None): # noqa: E501
"""ExtensionsV1beta1IngressBackend - a model defined in OpenAPI""" # noqa: E501
self._service_name = None
self._service_port = None
self.discriminator = None
self.service_name = service_name
self.service_port = service_port
@property
def service_name(self):
"""Gets the service_name of this ExtensionsV1beta1IngressBackend. # noqa: E501
Specifies the name of the referenced service. # noqa: E501
:return: The service_name of this ExtensionsV1beta1IngressBackend. # noqa: E501
:rtype: str
"""
return self._service_name
@service_name.setter
def service_name(self, service_name):
"""Sets the service_name of this ExtensionsV1beta1IngressBackend.
Specifies the name of the referenced service. # noqa: E501
:param service_name: The service_name of this ExtensionsV1beta1IngressBackend. # noqa: E501
:type: str
"""
if service_name is None:
raise ValueError("Invalid value for `service_name`, must not be `None`") # noqa: E501
self._service_name = service_name
@property
def service_port(self):
"""Gets the service_port of this ExtensionsV1beta1IngressBackend. # noqa: E501
Specifies the port of the referenced service. # noqa: E501
:return: The service_port of this ExtensionsV1beta1IngressBackend. # noqa: E501
:rtype: object
"""
return self._service_port
@service_port.setter
def service_port(self, service_port):
"""Sets the service_port of this ExtensionsV1beta1IngressBackend.
Specifies the port of the referenced service. # noqa: E501
:param service_port: The service_port of this ExtensionsV1beta1IngressBackend. # noqa: E501
:type: object
"""
if service_port is None:
raise ValueError("Invalid value for `service_port`, must not be `None`") # noqa: E501
self._service_port = service_port
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExtensionsV1beta1IngressBackend):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"49704239+yychai97@users.noreply.github.com"
] | 49704239+yychai97@users.noreply.github.com |
bbb90548f8bac0d1b4062e9b26e835791376a92c | a94c446a0d9ce77df965674f63be54d54b2be577 | /raspy/components/potentiometers/microchip/register_memory_address.py | 2c00349ad860e9bab97439a83e7dadd9917a5182 | [
"MIT"
] | permissive | cyrusbuilt/RasPy | 3434e02c2bff09ef9f3ff4995bda14edc781c14b | 1e34840cc90ea7f19317e881162209d3d819eb09 | refs/heads/master | 2020-03-18T20:19:27.426002 | 2018-08-03T17:07:25 | 2018-08-03T17:07:25 | 135,207,376 | 0 | 0 | MIT | 2018-08-03T17:07:26 | 2018-05-28T20:42:17 | Python | UTF-8 | Python | false | false | 480 | py | """Register memory addresses."""
WIPER0 = 0x00
"""Wiper 0."""
WIPER1 = 0x01
"""Wiper 1."""
WIPER0_NV = 0x02
"""Wiper 0 non-volatile."""
WIPER1_NV = 0x03
"""Wiper 1 non-volatile."""
TCON01 = 0x04
"""Terminal control for wipers 0 and 1."""
WIPER2 = 0x06
"""Wiper 2."""
WIPER3 = 0x07
"""Wiper 3."""
WIPER2_NV = 0x08
"""Wiper 2 non-volatile."""
WIPER3_NV = 0x09
"""Wiper 3 non-volatile."""
TCON23 = 0x04
"""Terminal control for wipers 2 and 3."""
NONE = 0
"""Null bit."""
| [
"cyrusbuilt@gmail.com"
] | cyrusbuilt@gmail.com |
e7b7bc0fa2a5b32fb56f559e5bdd1a625c0572ed | 8f439e50c741483ffefd5bad16f11d4b60da8fe9 | /examples/infomax_transductive.py | 785c7864d2eb6dd43726820bbc8b4e4abf238b6c | [
"MIT"
] | permissive | sumanthratna/pytorch_geometric | 19d66b6cc874fbce9207efc204a0ed1f9bb04d88 | 9c6a069c995cac38e4f3a2f1e9cfc7cebac889c6 | refs/heads/master | 2023-08-29T09:58:33.807755 | 2021-09-08T16:00:09 | 2021-09-08T16:00:09 | 404,423,682 | 2 | 0 | MIT | 2021-09-08T20:58:23 | 2021-09-08T16:44:15 | null | UTF-8 | Python | false | false | 1,720 | py | import os.path as osp
import torch
import torch.nn as nn
from torch_geometric.datasets import Planetoid
from torch_geometric.nn import GCNConv, DeepGraphInfomax
dataset = 'Cora'
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset)
dataset = Planetoid(path, dataset)
class Encoder(nn.Module):
def __init__(self, in_channels, hidden_channels):
super(Encoder, self).__init__()
self.conv = GCNConv(in_channels, hidden_channels, cached=True)
self.prelu = nn.PReLU(hidden_channels)
def forward(self, x, edge_index):
x = self.conv(x, edge_index)
x = self.prelu(x)
return x
def corruption(x, edge_index):
return x[torch.randperm(x.size(0))], edge_index
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = DeepGraphInfomax(
hidden_channels=512, encoder=Encoder(dataset.num_features, 512),
summary=lambda z, *args, **kwargs: torch.sigmoid(z.mean(dim=0)),
corruption=corruption).to(device)
data = dataset[0].to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
def train():
model.train()
optimizer.zero_grad()
pos_z, neg_z, summary = model(data.x, data.edge_index)
loss = model.loss(pos_z, neg_z, summary)
loss.backward()
optimizer.step()
return loss.item()
def test():
model.eval()
z, _, _ = model(data.x, data.edge_index)
acc = model.test(z[data.train_mask], data.y[data.train_mask],
z[data.test_mask], data.y[data.test_mask], max_iter=150)
return acc
for epoch in range(1, 301):
loss = train()
print('Epoch: {:03d}, Loss: {:.4f}'.format(epoch, loss))
acc = test()
print('Accuracy: {:.4f}'.format(acc))
| [
"matthias.fey@tu-dortmund.de"
] | matthias.fey@tu-dortmund.de |
24696d3d7d1ec6758135a501519de7bf80fc9c3f | 1208ac3718420c4a118ab6b777d99980b85f952a | /123.py | 5f73d229ebcbe9954cec2122d838cef49c4cf56b | [] | no_license | deimelperez/150_Py_challenges | 6ab9aea77c9c117b682790bfe36fb5e280cb8afc | b58f55312e7abf30cb7cb6d68b249bb5dcd3c862 | refs/heads/master | 2023-03-13T02:30:15.095467 | 2021-03-04T19:02:11 | 2021-03-04T19:02:11 | 344,579,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | import os
import csv
clear = lambda: os.system('cls')
def prompt():
ch = 0
while ch != 1 and ch != 2 and ch != 3 and ch != 4:
clear()
print('1- Add to file')
print('2- View all records')
print('3- Delete record')
print('4- Exit')
ch = int(input('Select an option: '))
clear()
return ch
def add_to_file():
file = open('122 Salaries.csv', 'a')
name = input('Enter name: ')
salary = input('Enter salary: ')
record = name + ',' + salary + '\n'
file.write(str(record))
file.close()
return
def view_records():
file = open('122 Salaries.csv', 'r')
for row in file:
print(row)
file.close()
input("\nPress enter to continue")
return
def delete_record():
file = list(csv.reader(open('122 Salaries.csv')))
tem = []
x = 0
for row in file:
tem.append(row)
print(x, row)
x = x + 1
row = int(input('Select which row you want to delete: '))
del tem[row]
file = open('122 Salaries.csv', 'w')
x = 0
for row in tem:
newRec = tem[x][0] + ',' + tem[x][1] + '\n'
file.write(str(newRec))
x = x + 1
file.close()
return
def main():
ch = 0
while ch != 4:
ch = prompt()
if ch == 1:
add_to_file()
elif ch == 2:
view_records()
elif ch == 3:
delete_record()
input("\nPress enter to continue")
return
main()
| [
"deimelperez@gmail.com"
] | deimelperez@gmail.com |
cff764949b2ed11e5a93eb1010ee840f4c990c13 | f7d3c8483521ec45bf0bb0927c0c57a275e03996 | /ch04-linear/linear_ml.py | fe394af134bee53da6a57b9be7d233d6d95f245d | [] | no_license | buzzzzx/DataScienceLearning | 2fe7fef6fb8538e2acd46d19643ff4fc50dc249a | af38157f01ba3682141b11788276daf6d6002b37 | refs/heads/master | 2020-03-23T16:40:21.517239 | 2018-07-24T15:10:17 | 2018-07-24T15:10:17 | 141,699,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,808 | py | # -*- coding: utf-8 -*-
__author__ = 'buzz'
__date__ = '2018/7/16 下午2:42'
"""
1. spit the data: trainData, testData
2. train the model
3. evaluate the model, get the MSE and COD
4. visualization
"""
import os
import sys
from sklearn import linear_model
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def linearModel(data):
features = ["x"]
labels = ["y"]
trainData = data[:15]
testData = data[15:]
model = trainModel(trainData, features, labels)
error, score = evaluateModel(model, testData, features, labels)
visualizeModel(model, data, features, labels, error, score)
def trainModel(trainData, features, labels):
model = linear_model.LinearRegression()
model.fit(trainData[features], trainData[labels])
return model
def evaluateModel(model, testData, features, labels):
error = np.mean((model.predict(testData[features]) - testData[labels]) ** 2)
score = model.score(testData[features], testData[labels])
return error, score
def visualizeModel(model, data, features, labels, error, score):
plt.rcParams['font.sans-serif'] = ['SimHei']
fig = plt.figure(figsize=(6, 6), dpi=80)
ax = fig.add_subplot(111)
ax.set_title("线性回归示例")
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.scatter(data[features], data[labels], color='b', label=u'%s: $y = x + \epsilon$' % "真实值")
if model.intercept_ > 0:
# 画线图,用红色线条表示模型结果
# 在Python3中,str不需要decode
if sys.version_info[0] == 3:
ax.plot(data[features], model.predict(data[features]), color='r',
label=u'%s: $y = %.3fx$ + %.3f' \
% ("预测值", model.coef_, model.intercept_))
else:
ax.plot(data[features], model.predict(data[features]), color='r',
label=u'%s: $y = %.3fx$ + %.3f' \
% ("预测值".decode("utf-8"), model.coef_, model.intercept_))
## coef: 系数,intercept: 截距
else:
# 在Python3中,str不需要decode
if sys.version_info[0] == 3:
ax.plot(data[features], model.predict(data[features]), color='r',
label=u'%s: $y = %.3fx$ - %.3f' \
% ("预测值", model.coef_, abs(model.intercept_)))
else:
ax.plot(data[features], model.predict(data[features]), color='r',
label=u'%s: $y = %.3fx$ - %.3f' \
% ("预测值".decode("utf-8"), model.coef_, abs(model.intercept_)))
legend = plt.legend(shadow=True)
legend.get_frame().set_facecolor('#6F93AE')
# 显示均方差和决定系数
# 在Python3中,str不需要decode
if sys.version_info[0] == 3:
ax.text(0.99, 0.01,
u'%s%.3f\n%s%.3f' \
% ("均方差:", error, "决定系数:", score),
style='italic', verticalalignment='bottom', horizontalalignment='right',
transform=ax.transAxes, color='m', fontsize=13)
else:
ax.text(0.99, 0.01,
u'%s%.3f\n%s%.3f' \
% ("均方差:".decode("utf-8"), error, "决定系数:".decode("utf-8"), score),
style='italic', verticalalignment='bottom', horizontalalignment='right',
transform=ax.transAxes, color='m', fontsize=13)
# 展示上面所画的图片。图片将阻断程序的运行,直至所有的图片被关闭
# 在Python shell里面,可以设置参数"block=False",使阻断失效。
plt.show()
if __name__ == '__main__':
filepath = 'data/simple_example.csv'
data = pd.read_csv(filepath)
linearModel(data)
# 选择列
# data["x"] data[["x", "y"]]
# 选择行
# data[:10]
| [
"buzzzzx233@gmail.com"
] | buzzzzx233@gmail.com |
ccb6cff749499176fa4d9de1366c42f43483fafb | 0add7953d3e3ce2df9e8265102be39b758579753 | /built-in/TensorFlow/Official/cv/image_segmentation/UNet_Industrial_for_TensorFlow/model/layers/__init__.py | a7816ce0045ac92926203a79ec08c91e0727c967 | [
"Apache-2.0"
] | permissive | Huawei-Ascend/modelzoo | ae161c0b4e581f8b62c77251e9204d958c4cf6c4 | df51ed9c1d6dbde1deef63f2a037a369f8554406 | refs/heads/master | 2023-04-08T08:17:40.058206 | 2020-12-07T08:04:57 | 2020-12-07T08:04:57 | 319,219,518 | 1 | 1 | Apache-2.0 | 2023-03-24T22:22:00 | 2020-12-07T06:01:32 | Python | UTF-8 | Python | false | false | 2,506 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from model.layers.utils import _log_hparams
from model.layers.activation import crelu
from model.layers.activation import elu
from model.layers.activation import leaky_relu
from model.layers.activation import prelu
from model.layers.activation import relu
from model.layers.activation import relu6
from model.layers.activation import selu
from model.layers.activation import sigmoid
from model.layers.activation import softmax
from model.layers.activation import tanh
from model.layers.conv2d import conv2d
from model.layers.deconv2d import deconv2d
from model.layers.dense import dense
from model.layers.drop_layers import dropout
from model.layers.math_ops import reduce_mean
from model.layers.normalization import batch_norm
from model.layers.padding import pad
from model.layers.pooling import average_pooling2d
from model.layers.pooling import max_pooling2d
from model.layers.array_ops import concat
from model.layers.array_ops import flatten
from model.layers.array_ops import reshape
from model.layers.array_ops import squeeze
from model.layers.array_ops import upscale_2d
__all__ = [
# activation layers
'crelu',
'elu',
'leaky_relu',
'prelu',
'relu',
'relu6',
'selu',
'sigmoid',
'softmax',
'tanh',
# array ops
'concat',
'flatten',
'reshape',
'squeeze',
'upscale_2d',
# conv layers
'conv2d',
# deconv layers
'deconv2d',
# dense layers
'dense',
# drop layers
'dropout',
# math_ops layers
'reduce_mean',
# normalization layers
'batch_norm',
# padding layers
'pad',
# pooling layers
'average_pooling2d',
'max_pooling2d',
]
| [
"1571856591@qq.com"
] | 1571856591@qq.com |
db27d9c00b53a47982cfeea67dd63ecb1da8129b | b9cda298b1e8da3a657aea29080a467055bae421 | /scandium/tpl/project_template/setup.pyt | cc777de4496e074ae1f3fefcdbd641970330004f | [] | no_license | vasfili/scandium | 9fa98c18100b18f8dac60955e5602ca038e681db | 843757d13a70a407626a0a7d5f6407a21d74e5f9 | refs/heads/master | 2020-12-13T22:34:50.661608 | 2015-10-14T13:14:27 | 2015-10-14T13:14:27 | 44,236,746 | 0 | 0 | null | 2015-10-14T09:11:37 | 2015-10-14T09:11:36 | Python | UTF-8 | Python | false | false | 5,219 | pyt | from setuptools import setup, find_packages
from py2exe.build_exe import py2exe as build_exe
from distutils.sysconfig import get_python_lib
import fnmatch
import py2exe
import sys
import os
# If run without args, build executables, in quiet mode.
if len(sys.argv) == 1:
sys.argv.append("py2exe")
sys.argv.append("-q")
################################################################
# Customize these variables
NAME = "{{project_name}}"
VERSION = "{{version}}"
DESCRIPTION = "{{description}}"
COMPANY_NAME = "{{company_name}}"
LICENSE = "{{license}}"
# Fiddle with these variables if you use Python modules that
# py2exe can't find, or you change the location of static
# and template data.
INCLUDES = ['jinja2.ext', 'PySide.QtNetwork']
EXCLUDES = ["Tkconstants", "Tkinter", "tcl"]
PACKAGES = find_packages(exclude=("tests",))
PACKAGE_DATA_DIRS = ('static', 'templates')
################################################################
# A program using PySide
# The manifest will be inserted as resource into {{project_name}}.exe. This
# gives the controls the Windows XP appearance (if run on XP ;-) and
# ensures the Visual C++ Redistributable Package DLLs get found.
#
# Another option would be to store it in a file named
# {{project_name}}.exe.manifest, and copy it with the data_files option into
# the dist-dir.
#
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="{{project_name}}"
type="win32"
/>
<description>{{project_name}} Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.VC90.CRT"
version="9.0.21022.8"
processorArchitecture="X86"
publicKeyToken="1fc8b3b9a1e18e3b"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
# Extention to embed package_data in py2exe's distributable
# See: http://crazedmonkey.com/blog/python/pkg_resources-with-py2exe.html
class MediaCollector(build_exe):
def copy_extensions(self, extensions):
build_exe.copy_extensions(self, extensions)
def collect_media(path):
for root, _, filenames in os.walk(path):
for fname in fnmatch.filter(filenames, '*'):
parent = os.path.join(self.collect_dir, root)
if not os.path.exists(parent):
self.mkpath(parent)
self.copy_file(os.path.join(root, fname), \
os.path.join(parent, fname))
self.compiled_files.append(os.path.join(root, fname))
for dname in PACKAGE_DATA_DIRS:
collect_media(os.path.join(NAME, dname))
collect_media(os.path.join(NAME, dname))
# Create Windows Application target
#
class Target:
def __init__(self, **kw):
self.__dict__.update(kw)
# for the versioninfo resources
self.version = VERSION
self.company_name = COMPANY_NAME
self.description = DESCRIPTION
self.copyright = LICENSE
self.name = NAME
app = Target(
# what to build
script = "runapp.py",
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog=NAME))],
icon_resources = [(1, "%s/static/icons/icon.ico" % NAME)],
dest_base = NAME
)
# Qt4 uses plugins for image processing. These cannot be bundled into the
# executable, so we copy them into the application directory, along with
# the Qt DLL files, which we then exclude from the bundle.
path = os.path.join(get_python_lib(), 'PySide', 'plugins', 'imageformats')
imageformats = []
for dll in os.listdir(path):
imageformats.append(os.path.join(path, dll))
path = os.path.join(get_python_lib(), 'PySide')
qt = []
for dll in ("QtCore4.dll", "QtGui4.dll", "QtNetwork4.dll"):
qt.append(os.path.join(path, dll))
DATA_FILES = [('imageformats', imageformats), ('', qt)]
################################################################
setup(
cmdclass = {'py2exe': MediaCollector},
data_files = DATA_FILES,
include_package_data=True,
options = {"py2exe": {"compressed": 1,
"optimize": 1,
"ascii": 0,
"bundle_files": 1,
"packages": PACKAGES,
"includes": INCLUDES,
"excludes": EXCLUDES,
# exclude the Qt4 DLLs to ensure the data_files version gets used, otherwise image processing will fail
"dll_excludes": ['msvcp90.dll', 'w9xpopen.exe', "QtCore4.dll", "QtGui4.dll", "QtNetwork4.dll"]}},
zipfile = None,
windows = [app],
) | [
"matt@bennett.name"
] | matt@bennett.name |
8449cd14afa4652b75eadf140e87adf6909ad3d1 | 1539f86f91ce0ee6150fba7363976d32cd37ece2 | /codes_auto/99.recover-binary-search-tree.py | 71f632672d901d70aa7038b3688b89a5cf53aea0 | [] | no_license | zhpbo/LeetCode_By_Python | fdee0a8b7ea7ed1f61a99f0041e1c748e50f138c | 0017b9db891d36789116f7299d32510a373e68da | refs/heads/master | 2023-07-09T15:38:45.003002 | 2020-08-18T07:04:51 | 2020-08-18T07:04:51 | 281,598,190 | 0 | 0 | null | 2021-08-18T04:58:39 | 2020-07-22T06:47:05 | null | UTF-8 | Python | false | false | 995 | py | #
# @lc app=leetcode.cn id=99 lang=python3
#
# [99] recover-binary-search-tree
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def recoverTree(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
tree = []
def helper(root,flag):
if not root:
return
helper(root.left,flag)
if flag=="traverse":
tree.append(root.val)
elif flag == "modify":
# print("更改前:",root.val)
root.val = tree[0]
del tree[0]
# print("更改后:",root.val)
helper(root.right, flag)
helper(root, flag="traverse")
# print(tree)
tree.sort()
# print(tree)
helper(root, flag="modify")
# @lc code=end | [
"liuyang0001@outlook.com"
] | liuyang0001@outlook.com |
6ece9e8b26aba619307519cdbbc359223e72c41a | 57d5ebeece91f5759d54e898154f11e97c6e5609 | /tests/add_trailing_comma_test.py | ee7bed3b6df646ee1055c45a784166a530c78b5b | [
"MIT"
] | permissive | chriskuehl/add-trailing-comma | 0c50e16fd6d25057d025f75a23ddde0aafec4dbd | d26f8ca449eb12cfaec3d3cd1f8ced789bd73e9a | refs/heads/master | 2020-12-02T07:46:50.317774 | 2017-07-10T01:29:14 | 2017-07-10T01:29:14 | 96,725,169 | 0 | 0 | null | 2017-07-10T01:53:11 | 2017-07-10T01:53:11 | null | UTF-8 | Python | false | false | 3,624 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import ast
import sys
import pytest
from add_trailing_comma import _fix_calls
from add_trailing_comma import main
@pytest.mark.parametrize(
'src',
(
# No relevant multiline calls
'x = 5',
'x(1)',
# Don't rewrite functions that have a single generator argument as
# this breaks lib2to3 based tools.
'tuple(\n'
' a for a in b\n'
')',
# Don't rewrite *args or **kwargs unless --py35-plus
'x(\n'
' *args\n'
')',
'x(\n'
' **kwargs\n'
')',
# The ast tells us that the inner call starts on line 2, but the first
# paren (and last paren) are actually both on line 3.
'x(\n'
' "foo"\n'
' "bar".format(1),\n'
')',
# Don't add a comma when it's not at the end of a line
'x((\n'
' 1,\n'
'))',
),
)
def test_fix_calls_noops(src):
ret = _fix_calls(src, py35_plus=False)
assert ret == src
def _has_16806_bug():
# See https://bugs.python.org/issue16806
return ast.parse('"""\n"""').body[0].value.col_offset == -1
@pytest.mark.xfail(not _has_16806_bug(), reason='multiline string parse bug')
def test_ignores_invalid_ast_node():
src = (
'x(\n'
' """\n'
' """\n'
')'
)
assert _fix_calls(src, py35_plus=False) == src
def test_py35_plus_rewrite():
src = (
'x(\n'
' *args\n'
')'
)
ret = _fix_calls(src, py35_plus=True)
assert ret == (
'x(\n'
' *args,\n'
')'
)
@pytest.mark.xfail(sys.version_info < (3, 5), reason='py35+ only feature')
@pytest.mark.parametrize(
'syntax',
(
'y(*args1, *args2)\n',
'y(**kwargs1, **kwargs2)\n',
),
)
def test_auto_detected_py35_plus_rewrite(syntax):
src = syntax + 'x(\n *args\n)'
expected = syntax + 'x(\n *args,\n)'
assert _fix_calls(src, py35_plus=False) == expected
def test_main_trivial():
assert main(()) == 0
def test_main_noop(tmpdir):
f = tmpdir.join('f.py')
f.write('x = 5\n')
assert main((f.strpath,)) == 0
assert f.read() == 'x = 5\n'
def test_main_changes_a_file(tmpdir, capsys):
f = tmpdir.join('f.py')
f.write('x(\n 1\n)\n')
assert main((f.strpath,)) == 1
out, _ = capsys.readouterr()
assert out == 'Rewriting {}\n'.format(f.strpath)
assert f.read() == 'x(\n 1,\n)\n'
def test_main_syntax_error(tmpdir):
f = tmpdir.join('f.py')
f.write('from __future__ import print_function\nprint 1\n')
assert main((f.strpath,)) == 0
def test_main_non_utf8_bytes(tmpdir, capsys):
f = tmpdir.join('f.py')
f.write_binary('# -*- coding: cp1252 -*-\nx = €\n'.encode('cp1252'))
assert main((f.strpath,)) == 1
out, _ = capsys.readouterr()
assert out == '{} is non-utf-8 (not supported)\n'.format(f.strpath)
def test_main_py35_plus_argument_star_args(tmpdir):
f = tmpdir.join('f.py')
f.write('x(\n *args\n)\n')
assert main((f.strpath,)) == 0
assert f.read() == 'x(\n *args\n)\n'
assert main((f.strpath, '--py35-plus')) == 1
assert f.read() == 'x(\n *args,\n)\n'
def test_main_py35_plus_argument_star_star_kwargs(tmpdir):
f = tmpdir.join('f.py')
f.write('x(\n **args\n)\n')
assert main((f.strpath,)) == 0
assert f.read() == 'x(\n **args\n)\n'
assert main((f.strpath, '--py35-plus')) == 1
assert f.read() == 'x(\n **args,\n)\n'
| [
"asottile@umich.edu"
] | asottile@umich.edu |
607b14e2c65395162c1e43a9e0046c08f05de656 | 7465148de5d656ebfe68b588a2f271a11384ed6a | /examples/multiple_actions_docker/second.py | e1f78138fe14078b2864bf7a2b3a58b404a44222 | [] | no_license | fiefdx/LitePipeline | 1462dacdd1a0f2c67972b6014b428c2c45d46949 | 09608f8c5f248d2ba10e5840bf00d69e76ed6291 | refs/heads/master | 2023-04-14T11:45:18.929249 | 2023-04-02T06:48:30 | 2023-04-02T06:48:30 | 226,355,739 | 2 | 0 | null | 2023-04-01T17:49:14 | 2019-12-06T15:17:33 | Python | UTF-8 | Python | false | false | 1,206 | py | # -*- coding: utf-8 -*-
import os
import sys
import time
import json
import logging
import datetime
from pathlib import Path
import tornado
from litepipeline_helper.models.action import Action
import logger
LOG = logging.getLogger(__name__)
home = str(Path.home())
if __name__ == "__main__":
workspace, input_data = Action.get_input()
logs_directory = os.path.join(workspace, "logs")
logger.config_logging(file_name = "second.log",
log_level = "DEBUG",
dir_name = logs_directory,
day_rotate = False,
when = "D",
interval = 1,
max_size = 20,
backup_count = 5,
console = False)
LOG.debug("test start")
LOG.debug("input_data: %s", input_data)
data = {"messages": []}
for i in range(10, 20):
now = datetime.datetime.now()
message = "%s: hello world, tornado(%03d): %s" % (now, i, tornado.version)
data["messages"].append(message)
LOG.debug(message)
time.sleep(1)
Action.set_output(data = data)
LOG.debug("test end")
| [
"fiefdx@163.com"
] | fiefdx@163.com |
51a5adfbaade61004be3dca483ae4850f82444ba | a2b20597759990445081057d35d113434cfcf970 | /stubs/integration_test/fixture_stubs/django/db/__init__.pyi | 5319b5a66ff2a2a4b828e875de269690e72683c4 | [
"MIT"
] | permissive | facebook/pyre-check | 34059599c02b65605c574f13555229f3b931fd4e | fe8ccedc572cc1faa1fd01e9138f65e982875002 | refs/heads/main | 2023-09-03T19:10:11.587028 | 2023-09-02T07:40:35 | 2023-09-02T07:40:35 | 110,274,488 | 6,703 | 575 | MIT | 2023-09-13T17:02:32 | 2017-11-10T17:31:36 | OCaml | UTF-8 | Python | false | false | 843 | pyi | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
from typing import Any
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.utils import (
ConnectionHandler,
DatabaseError as DatabaseError,
DataError as DataError,
Error as Error,
IntegrityError as IntegrityError,
InterfaceError as InterfaceError,
InternalError as InternalError,
NotSupportedError as NotSupportedError,
OperationalError as OperationalError,
ProgrammingError as ProgrammingError,
)
def close_old_connections(**kwargs: Any) -> None: ...
def reset_queries(**kwargs: Any) -> None: ...
transaction: Any
connections: ConnectionHandler
connection: BaseDatabaseWrapper
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
3e0591086d651f921210267a7a24e4842272772c | 62fc811f203f041c07d4bc782ce5f7f5cb8dd7c6 | /test.py | 01b7128de9357da4fab8a70928f00beee19546bf | [] | no_license | riaz/Recee | 71dba563383059bac474bf361f216adfdebab8ae | a68c356a5c77ef0365f45c557d945d50fadcb430 | refs/heads/master | 2021-01-10T05:07:40.018566 | 2015-11-16T04:46:31 | 2015-11-16T04:46:31 | 46,204,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | from openalpr import Alpr
import sys
alpr = Alpr("eu", "nplate_train/openalpr.conf.in", "nplate_train/runtime_data")
if not alpr.is_loaded():
print("Error loading OpenALPR")
sys.exit(1)
#alpr.set_top_n(20)
alpr.set_default_region("eu")
results = alpr.recognize_file("/home/riaz/Desktop/hack/2009_09_08_drive_0010/I1_000388.png")
for plate in results['results']:
if len(plate['candidates']) > 0:
print "Found: %12s %12f" % ( plate['candidates'][0]['plate'],plate['candidates'][0]['confidence'])
# Call when completely done to release memory
alpr.unload()
| [
"riaz.2012@gmail.com"
] | riaz.2012@gmail.com |
a2c55b36e5abd15a11aed5da04519c8e52823407 | 17be0e9275082c3239fedc11bc617ecd5856136c | /letor/offline/train_one_state.py | ee6031ab06d7a984a088428fc91a8abe491fd882 | [] | no_license | mdkmongo/semantichealth.github.io | 8bb814bfd3b0b3a71828625a2acebfd8013e2eef | 6462ba2cc406967b0371b09822e4c26860e96c91 | refs/heads/master | 2021-01-21T08:24:07.128484 | 2016-08-19T05:35:04 | 2016-08-19T05:35:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | from s3_helpers import *
from get_rank_for_state_plan import *
from query_characterizer import *
import pickle
def train_one_state(click_data, state, log, s3_fea):
'''
'''
# set folder name of S3
s3clnt = s3_helper()
log.trace('characterize queries for state %s' %state)
s_rows = click_data[click_data['state']==state]
q_cluster, vocab, centroids = query_characterizer(s_rows['query'], log)
log.trace('run letor training for state %s' %state)
letor_rank, plans = get_rank_for_state_plan(q_cluster, np.array([[r['ranks'],r['clicks']] for r in s_rows]), log, s3_fea)
if not plans: # or (not letor_rank):
log.warning('no feature file found for state %s, skip training.' %state)
return
# exclude missing plan IDs in ES
with open('missing.pickle') as f:
missing = pickle.load(f)
picker = np.array([p not in missing for p in plans])
# upload the stuff to S3
save_training = 'training/%s_%d.pickle' %(state, len(letor_rank))
with open(save_training, 'w') as f:
pickle.dump([list(np.array(plans)[picker]), letor_rank[:, picker]], f)
s3clnt.delete_by_state('training/%s' %(state))
s3clnt.upload(save_training)
save_online = 'online/%s_runtime.pickle' %(state)
cen = [list(c) for c in centroids]
voc = [None]*len(vocab)
for k,v in vocab.items():
voc[v] = k
with open(save_online, 'w') as f:
pickle.dump([voc, cen], f)
s3clnt.delete_by_state('online/%s' %(state))
s3clnt.upload(save_online)
log.trace('ranking & online file are saved on s3')
| [
"ynglei@gmail.com"
] | ynglei@gmail.com |
8df1690d1f23f89363ab4c98e63ee1b3d812a469 | 505dc9404c89e56aea70f2db9fc1b3fb311fc5d9 | /usr/lib/enigma2/python/Components/Renderer/speedyflipclockfortuna_metall1.py | ec87abf72fe3c8bbce8261dc3e619d1ce0ca2573 | [] | no_license | e2plugins/4ATV_speedy_blue | ae8181ed4017beb4b48e58fe7cbbcbe2a1696057 | c84da50a0d872a2e74812214eed5532ed0893534 | refs/heads/master | 2022-11-14T17:09:41.134795 | 2020-07-12T06:24:24 | 2020-07-12T06:24:24 | 277,350,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,136 | py | # FlipClock
# Copyright (c) .:TBX:. 2016
# Mod by Maggy
# Th4ATV_2_2_speedy_black_mod program 4ATV_2_2_speedy_black_mod free software: you can red4ATV_2_2_speedy_black_modtribute it and/or modify
# it under the terms of the GNU General Public License as publ4ATV_2_2_speedy_black_modhed by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Th4ATV_2_2_speedy_black_mod program 4ATV_2_2_speedy_black_mod d4ATV_2_2_speedy_black_modtributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with th4ATV_2_2_speedy_black_mod program. If not, see <http://www.gnu.org/licenses/>.
#
from Components.Renderer.Renderer import Renderer
from enigma import ePixmap, eTimer, eDVBVolumecontrol
from Components.config import config
class speedyflipclock_metall1(Renderer):
def __init__(self):
Renderer.__init__(self)
self.timer = eTimer()
self.timer.callback.append(self.pollme)
GUI_WIDGET = ePixmap
def changed(self, what):
if not self.suspended:
value = self.source.text
if 'H1' in value:
value = value[3:4]
elif 'H2' in value:
value = value[4:5]
elif 'M1' in value:
value = value[3:4]
elif 'M2' in value:
value = value[4:5]
elif 'S1' in value:
value = value[3:4]
elif 'S2' in value:
value = value[4:5]
else:
value = 0
self.instance.setPixmapFromFile('/usr/share/enigma2/4ATV_2_2_speedy_black_fortuna/flipclock/flipclock_metall1/' + str(value) + '.png')
def pollme(self):
self.changed(None)
return
def onShow(self):
self.suspended = False
self.timer.start(200)
def onHide(self):
self.suspended = True
self.timer.stop()
| [
"captain.onboard@web.de"
] | captain.onboard@web.de |
77028e65d46a2e748e17451d5f7ea8d70505ece8 | afdda41e01518db1a2685e9eb7fad524d7b5c69b | /ABC161/D/test.py | a064933ef448b042dd8bc488c08b80b5cdfacac1 | [] | no_license | kame3niku9/atcoder | 4bea5598b6529b7dd5d84a4b342b7ef650b81141 | b5042f31d43425e4ca1e02cc4bbfecbd5a738b49 | refs/heads/master | 2022-07-10T11:37:47.560392 | 2020-11-22T13:47:08 | 2020-11-22T13:47:08 | 233,927,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 961 | py | from main import resolve
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """15"""
output = """23"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """1"""
output = """1"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """13"""
output = """21"""
self.assertIO(input, output)
def test_入力例_4(self):
input = """100000"""
output = """3234566667"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main() | [
"kame3niku9@gmail.com"
] | kame3niku9@gmail.com |
fb3292faa83df637e9541d37e4a20e7c4c8eaabc | 3562a01673bc62df91fdff621e48b82b15cb330c | /Part 1 - Data Preprocessing/Section 2 -------------------- Part 1 - Data Preprocessing --------------------/data_preprocess.py | 4a7456da3e023a18c6b161a2a1cd77ee4e089c56 | [] | no_license | laksh10-stan/Machine-Learning-A-Z | 16bf070a6ddbde812b053b84d9f09186cf9a0257 | ba2ac016879dc5ea4be4d670e7a8de5e24abbae2 | refs/heads/master | 2021-02-08T20:46:36.892343 | 2020-03-01T17:54:16 | 2020-03-01T17:54:16 | 244,195,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 23 00:25:37 2019
@author: laksh
"""
#importing libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#import dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:,:-1].values
y = dataset.iloc[:,3].values
#taking care of the missing data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN',strategy = 'mean', axis = 0)
imputer = imputer.fit(X[:,1:3])
X[:,1:3] = imputer.transform(X[:,1:3])
print(X)
print(y)
#Encoding categorical Data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
# Dummy Encoding
onehotencoder = OneHotEncoder(categorical_features = [0])
X = onehotencoder.fit_transform(X).toarray()
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
#Splitting dataset into training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
#Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
#Data Preprocessing Template
| [
"43860070+Kushjadaun@users.noreply.github.com"
] | 43860070+Kushjadaun@users.noreply.github.com |
6fcb98bf130d6fe7794dcbb0f39cba96ea071f2b | 316eada5e13da6207801831b115cb8bc0a8ed970 | /politician/urls.py | 60abfdbfc4ac76261894f1a93d0a5ba1e3722102 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | edward-ly/WeVoteServer | d942ecba975e2b5a2082a078c9bd2b35ad58d3d3 | 24b9f0d0cd065f933707dd08391f3883bab9fb37 | refs/heads/develop | 2021-01-23T21:21:39.227475 | 2019-05-09T16:04:36 | 2019-05-09T16:04:36 | 102,893,733 | 0 | 0 | null | 2017-09-08T18:51:44 | 2017-09-08T18:44:40 | Python | UTF-8 | Python | false | false | 1,260 | py | # politician/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from . import views_admin
from django.conf.urls import url
urlpatterns = [
url(r'^$', views_admin.politician_list_view, name='politician_list',),
url(r'^edit_process/$', views_admin.politician_edit_process_view, name='politician_edit_process'),
url(r'^delete/', views_admin.politician_delete_process_view, name='politician_delete_process'),
url(r'^import/$',
views_admin.politicians_import_from_master_server_view, name='politicians_import_from_master_server'),
url(r'^new/$', views_admin.politician_new_view, name='politician_new'),
url(r'^(?P<politician_id>[0-9]+)/edit/$', views_admin.politician_edit_view, name='politician_edit'),
url(r'^(?P<politician_id>[0-9]+)/retrieve_photos/$',
views_admin.politician_retrieve_photos_view, name='politician_retrieve_photos'),
# url(r'^(?P<politician_id>[0-9]+)/tag_new/$', views.politician_tag_new_view, name='politician_tag_new'),
# url(r'^(?P<politician_id>[0-9]+)/tag_new_process/$',
# views.politician_tag_new_process_view, name='politician_tag_new_process'),
# url(r'^(?P<pk>[0-9]+)/add_tag/$', views.PoliticianAddTagView.as_view(), name='politician_add_tag'),
] | [
"dale.mcgrew@wevoteusa.org"
] | dale.mcgrew@wevoteusa.org |
2971a3b1ec52cbc72aa4073ad4c8172d91dccafd | 4b265adfae6d91d614a628705571805a2c3d241e | /migrations/versions/3e4b752d4b66_.py | 4be97e84029f717fc303084c944902289e0ab040 | [] | no_license | 1010784344/mybbs | 02d85a661f42b648cd0939c0550959d758f0717d | 0787c77c32f78de6e6cf16db55c3502bf43307d2 | refs/heads/master | 2022-12-22T17:45:55.908981 | 2020-03-02T06:44:10 | 2020-03-02T06:44:10 | 244,299,839 | 0 | 0 | null | 2022-09-16T18:18:56 | 2020-03-02T06:45:01 | Python | UTF-8 | Python | false | false | 1,042 | py | """empty message
Revision ID: 3e4b752d4b66
Revises: 907d0dec1971
Create Date: 2018-06-10 22:55:59.028570
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3e4b752d4b66'
down_revision = '907d0dec1971'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.Column('author_id', sa.String(length=50), nullable=False),
sa.ForeignKeyConstraint(['author_id'], ['front_user.id'], ),
sa.ForeignKeyConstraint(['post_id'], ['post.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comment')
# ### end Alembic commands ###
| [
"1010784344@qq.com"
] | 1010784344@qq.com |
b441d0ca2ecfaf7defd1eaf369f3be18a2441a4e | 2a1f4c4900693c093b2fcf4f84efa60650ef1424 | /py/cli/factory_env_unittest.py | d04bc714f33ffe86931dfcac330040122dbf74b8 | [
"BSD-3-Clause"
] | permissive | bridder/factory | b925f494303728fa95017d1ba3ff40ac5cf6a2fd | a1b0fccd68987d8cd9c89710adc3c04b868347ec | refs/heads/master | 2023-08-10T18:51:08.988858 | 2021-09-21T03:25:28 | 2021-09-21T03:25:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,655 | py | #!/usr/bin/env python3
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import unittest
from cros.factory.cli import factory_env
from cros.factory.utils import process_utils
FACTORY_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
FACTORY_ENV_TOOL = os.path.join(FACTORY_ROOT, "bin/factory_env")
FACTORY_ENV_SCRIPT = os.path.join(FACTORY_ROOT, "py/cli/factory_env.py")
DUMMY_SCRIPT = os.path.join(
FACTORY_ROOT, "py/cli/testdata/scripts/dummy_script.py")
DUMMY_EXCUTABLE = os.path.join(
FACTORY_ROOT, "py/cli/testdata/bin/dummy_script")
class FactoryEnvUnittest(unittest.TestCase):
def testSymbolicLinkToFactoryEnv(self):
self.assertEqual(0,
process_utils.LogAndCheckCall(DUMMY_EXCUTABLE).returncode)
def testFactoryEnvWithSymbolicLinkToFactoryEnv(self):
self.assertEqual(0, process_utils.LogAndCheckCall(
[FACTORY_ENV_TOOL, DUMMY_EXCUTABLE]).returncode)
def testMultipleFactoryEnv(self):
self.assertEqual(0, process_utils.LogAndCheckCall(
[FACTORY_ENV_TOOL, FACTORY_ENV_TOOL, DUMMY_EXCUTABLE]).returncode)
def testFactoryEnvWithScript(self):
self.assertEqual(0, process_utils.LogAndCheckCall(
[FACTORY_ENV_TOOL, DUMMY_SCRIPT]).returncode)
def testHelpMessage(self):
process = process_utils.Spawn(
[FACTORY_ENV_TOOL, '--help'], read_stdout=True)
self.assertEqual(factory_env.HELP_MSG, process.stdout_data)
self.assertEqual(1, process.returncode)
def testScriptNotFound(self):
process = process_utils.Spawn(
[FACTORY_ENV_TOOL, 'script/not/found'], read_stdout=True)
self.assertEqual(factory_env.HELP_MSG, process.stdout_data)
self.assertEqual(1, process.returncode)
def testPythonInterpreter(self):
output = process_utils.CheckOutput(
[FACTORY_ENV_TOOL, 'python', '-c', 'import sys; print(sys.path)'])
self.assertIn('factory/py_pkg', output)
class SymlinkUnittest(unittest.TestCase):
def testLegalityForSymlinkInBin(self):
for path in glob.glob(os.path.join(FACTORY_ROOT, "bin/**")):
if not os.path.islink(path):
continue
real_path = os.path.realpath(path)
if not real_path.endswith('.py'):
continue
# Make sure bin/tool_name links to FACTORY_ENV_SCRIPT
self.assertEqual(real_path, FACTORY_ENV_SCRIPT)
# Make sure py/cli/tool_name.py exist
self.assertTrue(os.path.exists(factory_env.GetRealScriptPath(path)))
if __name__ == '__main__':
unittest.main()
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
bfe2741feb16a2c462c0fd4040ed8d43e1017389 | c5c56d7c14b4518e53bcde2527b9cc6e53a7e1b9 | /doctests/yatzy.py | 6dc991e1845f9366663dfba8bb5396adf434c97b | [] | no_license | lancelote/pluralsight-unit-testing-python | 0402a39e3800eec49f2be529e684d028689d3b47 | fd5ce8264bc95ed66109c4fa575a177248c3d49a | refs/heads/master | 2021-01-10T08:06:39.605195 | 2016-03-23T08:15:25 | 2016-03-23T08:15:25 | 51,952,064 | 4 | 6 | null | null | null | null | UTF-8 | Python | false | false | 3,470 | py | # coding=utf-8
""""
Yatzy Game
"""
from operator import itemgetter
def dice_counts(dice):
"""Make a dictionary of how many of each value are in the dice
Args:
dice (lst): A sorted list of 5 integers indicating the dice rolled
Returns:
dict: How many of each value are in the dice
Examples:
>>> sorted(dice_counts([1, 2, 2, 3, 3]).items())
[(1, 1), (2, 2), (3, 2), (4, 0), (5, 0), (6, 0)]
>>> dice_counts('12345')
Traceback (most recent call last):
...
TypeError: Can't convert 'int' object to str implicitly
"""
return {x: dice.count(x) for x in range(1, 7)}
def small_straight(dice):
"""Score the given roll in the 'Small Straight' Yatzy category
Args:
dice (lst): A sorted list of 5 integers indicating the dice rolled
Returns:
int: Score
Examples:
>>> small_straight([1, 2, 3, 4, 5])
15
>>> small_straight([1, 2, 3, 4, 4])
0
This function works with lists or sets or other collection types:
>>> small_straight({1, 2, 3, 4, 5})
15
>>> small_straight([5, 4, 3, 2, 1])
15
"""
return sum(dice) if sorted(dice) == [1, 2, 3, 4, 5] else 0
def yatzy(dice):
"""Score the given roll in the 'Yatzy' category
Args:
dice (list): A sorted list of 5 integers indicating the dice rolled
Returns:
int: Score
Examples:
>>> yatzy([1, 1, 1, 1, 1])
50
>>> yatzy([4, 4, 4, 4, 4])
50
>>> yatzy([4, 4, 4, 4, 1])
0
"""
counts = dice_counts(dice)
if 5 in counts.values():
return 50
return 0
def full_house(dice):
"""Score the given roll in the 'Full House' category
Args:
dice (list): A sorted list of 5 integers indicating the dice rolled
Returns:
int: Score
Examples:
>>> full_house([1, 1, 2, 2, 2])
8
>>> full_house([6, 6, 6, 2, 2])
22
>>> full_house([1, 2, 3, 4, 5])
0
>>> full_house([1, 2, 2, 1, 3])
0
"""
counts = dice_counts(dice)
if 2 in counts.values() and 3 in counts.values():
return sum(dice)
return 0
def ones(dice):
"""Scores the given roll in the 'Ones' category
Args:
dice (list): A sorted list of 5 integers indicating the dice rolled
Returns:
int: Score
"""
return dice_counts(dice)[1]
def twos(dice):
"""Scores the given roll in the 'Twos' category
Args:
dice (list): A sorted list of 5 integers indicating the dice rolled
Returns:
int: Score
"""
return dice_counts(dice)[2]*2
ALL_CATEGORIES = [full_house, yatzy, small_straight, ones, twos]
def scores_in_categories(dice, categories=ALL_CATEGORIES):
"""Score the dice in each category and return those with a non-zero score
Args:
dice (list): A sorted list of 5 integers indicating the dice rolled
categories (list): A list of category functions
Returns:
list: Category scores
Examples:
>>> scores = scores_in_categories([1, 1, 2, 2, 2])
>>> [(score, category.__name__) for (score, category) in scores]
[(8, 'full_house'), (6, 'twos'), (2, 'ones')]
"""
scores = [(category(dice), category) for category in categories
if category(dice) > 0]
return sorted(scores, reverse=True, key=itemgetter(0))
| [
"karateev.pavel@ya.ru"
] | karateev.pavel@ya.ru |
91b0db98ecd89c19b85d3d89b875b8fb59e63615 | d110546d747d7e3865ce5742d5fca09f404623c0 | /tests/pytests/unit/modules/test_devmap.py | f7fc9f09ea1f41f68a666b5c0b0b0a4431769644 | [
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] | permissive | saltstack/salt | 354fc86a7be1f69514b3dd3b2edb9e6f66844c1d | 1ef90cbdc7203f97775edb7666db86a41eb9fc15 | refs/heads/master | 2023-07-19T20:56:20.210556 | 2023-06-29T23:12:28 | 2023-07-19T11:47:47 | 1,390,248 | 11,026 | 6,296 | Apache-2.0 | 2023-09-14T20:45:37 | 2011-02-20T20:16:56 | Python | UTF-8 | Python | false | false | 991 | py | """
:codeauthor: Rupesh Tare <rupesht@saltstack.com>
"""
import os.path
import pytest
import salt.modules.devmap as devmap
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {devmap: {}}
def test_multipath_list():
"""
Test for Device-Mapper Multipath list
"""
mock = MagicMock(return_value="A")
with patch.dict(devmap.__salt__, {"cmd.run": mock}):
assert devmap.multipath_list() == ["A"]
def test_multipath_flush():
"""
Test for Device-Mapper Multipath flush
"""
mock = MagicMock(return_value=False)
with patch.object(os.path, "exists", mock):
assert devmap.multipath_flush("device") == "device does not exist"
mock = MagicMock(return_value=True)
with patch.object(os.path, "exists", mock):
mock = MagicMock(return_value="A")
with patch.dict(devmap.__salt__, {"cmd.run": mock}):
assert devmap.multipath_flush("device") == ["A"]
| [
"mwilhite@vmware.com"
] | mwilhite@vmware.com |
db3caed3bea7b8e75f04ec4721bc0ebd0e3624b1 | ede96590eee4880ff83d1f1d8db5229e92c6e919 | /leasing/metadata.py | 0639d907861c764c94129916cc3f6a2315f07bc7 | [
"MIT"
] | permissive | igordavydsson/mvj | a4c5b39e7be9f95e15a2e906ad61b98611998063 | b467c6229f9d458d56b66f628b0841adb67a2970 | refs/heads/master | 2020-04-22T20:42:06.650182 | 2019-02-12T13:50:57 | 2019-02-12T13:50:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,833 | py | from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from enumfields.drf import EnumField
from rest_framework.fields import ChoiceField, DecimalField
from rest_framework.metadata import SimpleMetadata
from rest_framework.relations import PrimaryKeyRelatedField
from field_permissions.metadata import FieldPermissionsMetadataMixin
from leasing.models import Contact, Decision, Invoice, Lease
from leasing.models.invoice import InvoiceSet
from leasing.permissions import PerMethodPermission
from users.models import User
ALL_METHODS = {
'GET': False,
'OPTIONS': False,
'HEAD': False,
'POST': False,
'PUT': False,
'PATCH': False,
'DELETE': False,
}
class FieldsMetadata(FieldPermissionsMetadataMixin, SimpleMetadata):
"""Returns metadata for all the fields and the possible choices in the
serializer even when the fields are read only.
Additionally adds decimal_places and max_digits info for DecimalFields."""
def determine_metadata(self, request, view, serializer=None):
metadata = super().determine_metadata(request, view)
if not serializer and hasattr(view, 'get_serializer'):
serializer = view.get_serializer()
if serializer:
metadata["fields"] = self.get_serializer_info(serializer)
# Determine allowed methods for model views
if hasattr(serializer, 'Meta') and serializer.Meta.model:
method_permissions = ALL_METHODS.copy()
for permission in view.get_permissions():
if not hasattr(permission, 'get_required_permissions'):
continue
for method in method_permissions.keys():
perms = permission.get_required_permissions(method, serializer.Meta.model)
method_permissions[method] = request.user.has_perms(perms)
metadata['methods'] = method_permissions
# Determine methods the user has permission to for custom views
# and viewsets that are using PerMethodPermission.
if PerMethodPermission in view.permission_classes:
permission = PerMethodPermission()
method_permissions = {}
for method in view.allowed_methods:
required_perms = permission.get_required_permissions(method, view)
method_permissions[method.upper()] = request.user.has_perms(required_perms)
metadata['methods'] = method_permissions
return metadata
def get_field_info(self, field):
field_info = super().get_field_info(field)
if isinstance(field, DecimalField):
field_info['decimal_places'] = field.decimal_places
field_info['max_digits'] = field.max_digits
# Kludge for translating language names
if isinstance(field, ChoiceField) and field.field_name == 'language':
field_info['choices'] = [{
'value': choice_value,
'display_name': _(choice_name).capitalize(),
} for choice_value, choice_name in field.choices.items()]
field_info['choices'].sort(key=lambda x: x['display_name'])
if isinstance(field, PrimaryKeyRelatedField) or isinstance(field, EnumField):
# TODO: Make configurable
if hasattr(field, 'queryset') and field.queryset.model in (User, Lease, Contact, Decision, Invoice,
InvoiceSet):
return field_info
field_info['choices'] = [{
'value': choice_value,
'display_name': force_text(choice_name, strings_only=True)
} for choice_value, choice_name in field.choices.items()]
return field_info
| [
"mikko.keskinen@anders.fi"
] | mikko.keskinen@anders.fi |
66228a98b7aef124fd015c3823c8dd4f0b4d939d | a34a6861adabdffba0dec1bf9ba2d6b48c4564cb | /model.py | db2d48ca5676c1d6378ff82589047949cbfd1179 | [] | no_license | AotY/gumbel_softx_vae | d4095212117cdbdd71434fd47f51ae0aef42869f | f345efe797fb9adc00f5d4e288da80102f23850e | refs/heads/master | 2020-04-02T03:31:28.336284 | 2018-10-21T05:11:36 | 2018-10-21T05:11:36 | 153,970,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2018 LeonTao
#
# Distributed under terms of the MIT license.
"""
"""
import torch
import torch.nn as nn
import torch.funtional as F
from gumbel_softmax import GumbelSoftmax
class GumbelVAE(nn.Module):
def __init__(self,
input_size=784,
latent_size,
category_size,
device):
super(GumbelVAE, self).__init__()
self.input_size = input_size
self.latent_size = latent_size
self.category_size = category_size
self.fc1 = nn.Linear(input_size, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, latent_size * category_size)
sefl.fc4 = nn.Linear(latent_size * category_size, 256)
self.fc5 = nn.Linear(256, 512)
self.fc6 = nn.Linear(512, input_sizse)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid(dim=2)
self.gumbel_softmax = GumbelSoftmax(dim=2,
device=device)
def encode(self, input):
h1 = self.relu(self.fc1(input))
h2 = self.relu(self.fc2(h1))
h3 = self.relu(self.fc3(h2))
return h3
def decode(self, encode_output):
h4 = self.relu(self.fc4(encode_output))
h5 = self.relu(self.fc5(h4))
output = self.sigmoid(self.fc6(h5))
return output
def forward(self, input, temperature):
encode_output = self.encode(input)
tmp = encode_output.view(encode_output.size(0),
self.latent_size,
self.category_size)
tmp = self.gumbel_softmax(tmp, temperature)
tmp = tmp.view(-1, slef.latent_size * self.category_size)
decode_output = self.decode(tmp_softmax)
return decode_output, F.softmax(encode_output)
| [
"694731929@qq.com"
] | 694731929@qq.com |
c4bc04bf5d469f3e5315f2941b33cfd2a704a7ed | 35ab93904c03c1494b470fe60ff17a6e3b8858e4 | /tests/mocks/committees.py | 56d14dbb8a5c2c603ea525703c8a13ac295bf0d4 | [
"MIT"
] | permissive | alefbt/knesset-data-pipelines | cb6220fc96c95f50925e4b99d8682760729cf067 | ed743fb4c84ce9e9ae0b935d686d05673d868416 | refs/heads/master | 2021-06-22T20:06:17.254073 | 2017-08-13T17:08:40 | 2017-08-13T17:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,195 | py | from datapackage_pipelines_knesset.committees.processors.download_committee_meeting_protocols import DownloadCommitteeMeetingProtocolsProcessor
from datapackage_pipelines_knesset.committees.processors.parse_committee_meeting_protocols import ParseCommitteeMeetingProtocolsProcessor
from datapackage_pipelines_knesset.committees.processors.committee_meeting_protocols_update_db import CommitteeMeetingProtocolsUpdateDbProcessor
import os
from datapackage_pipelines_knesset.common.db import get_session
class MockDownloadCommitteeMeetingProtocols(DownloadCommitteeMeetingProtocolsProcessor):
def _get_session(self):
return get_session(connection_string="sqlite://")
def _reuqests_get(self, url):
if url == "http://fs.knesset.gov.il//20/Committees/20_ptv_389210.doc":
filename = "20_ptv_389210.doc"
elif url == "http://knesset.gov.il/protocols/data/rtf/knesset/2007-12-27.rtf":
filename = "2007-12-27.rtf"
elif url == "http://fs.knesset.gov.il//20/Committees/20_ptv_387483.doc":
filename = "20_ptv_387483.doc"
else:
raise Exception("unknown url: {}".format(url))
filename = os.path.join(os.path.dirname(__file__), filename)
if not os.path.exists(filename):
res = super(MockDownloadCommitteeMeetingProtocols, self)._reuqests_get(url)
if res.status_code != 200:
with open(filename+".status_code", 'w') as f:
f.write(str(res.status_code))
with open(filename, 'wb') as f:
f.write(res.content)
with open(filename, "rb") as f:
content = f.read()
if os.path.exists(filename+".status_code"):
with open(filename+".status_code") as f:
status_code = int(f.read())
else:
status_code = 200
return type("MockResponse", (object,), {"status_code": status_code,
"content": content})()
class MockParseCommitteeMeetingProtocols(ParseCommitteeMeetingProtocolsProcessor):
pass
class MockCommitteeMeetingProtocolsUpdateDb(CommitteeMeetingProtocolsUpdateDbProcessor):
pass
| [
"ori@uumpa.com"
] | ori@uumpa.com |
5b8612f3e472db95cd9fdaa093ba14d6411ec101 | dd89a85bbefa12a6c8e8b66ffc84c08767f0e841 | /backend/task_profile/migrations/0001_initial.py | c7740497962e5be06a671658b9c15cad0b155cb6 | [] | no_license | crowdbotics-apps/sample-27023 | ac5f358cba9432b02080d3f3177efd23d35a08ed | e2e1f0d918e6cc47a87bfd7f318f1b6797f19d2d | refs/heads/master | 2023-04-30T14:24:59.449141 | 2021-05-21T03:01:21 | 2021-05-21T03:01:21 | 369,397,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,239 | py | # Generated by Django 2.2.20 on 2021-05-21 03:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TaskerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile_number', models.CharField(max_length=20)),
('photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('last_login', models.DateTimeField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('city', models.CharField(blank=True, max_length=50, null=True)),
('vehicle', models.CharField(blank=True, max_length=50, null=True)),
('closing_message', models.TextField(blank=True, null=True)),
('work_area_radius', models.FloatField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='taskerprofile_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=20)),
('message', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('user', models.ManyToManyField(related_name='notification_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='InviteCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=20)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invitecode_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='CustomerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile_number', models.CharField(max_length=20)),
('photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('last_login', models.DateTimeField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customerprofile_user', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
f8b2e6eb000e9bb4ab907381ef8afbef0d9ae96e | 453df013de5dc74291db65436011b661d969e4b6 | /soccer/gameplay2/plays/restarts/kick_penalty.py | 3476f67818ae84d74caf1477ebca2fda655fab5d | [] | no_license | david2194/robocup-software | 3f04eb7de4b84cafdab1a956df7cc48c3d3d4604 | 6f98c38ddb129ca49be357fc230990c16eadf9d4 | refs/heads/master | 2021-01-17T21:39:47.832797 | 2014-07-15T01:31:51 | 2014-07-15T01:31:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | import play
import behavior
import robocup
import main
import tactics.line_up
import tactics.penalty
# one robot kicks the ball, the others just line up and wait
class KickPenalty(play.Play):
def __init__(self):
super().__init__(continuous=True)
self.add_transition(behavior.Behavior.State.start,
behavior.Behavior.State.running,
lambda: True,
'immediately')
kicker = tactics.penalty.Penalty()
self.add_subbehavior(kicker, 'kicker', required=True, priority=10)
line = robocup.Segment(robocup.Point(1.5, 1), robocup.Point(1.5, 2.5))
line_up = tactics.line_up.LineUp(line)
@classmethod
def score(cls):
gs = main.game_state()
return 0.0 if gs.is_setup_state() and gs.is_our_penalty() else float("inf")
| [
"justbuchanan@gmail.com"
] | justbuchanan@gmail.com |
0d6c12a20b87eb1a3983e038e756badb1c55e1c1 | 55c24645dd63a1c41037dcfb9fb45bc7bcdea4be | /venv/lib/python3.7/site-packages/sqlalchemy/__init__.py | a8209abb0731906a86b9be969f0404a04d25f2f6 | [] | no_license | abdullah-nawaz/flask-boilerplate | 7c42801a21ee3e6a647cc8a7d92e0285f8e86cad | 01bc7fe1140e8ec613de4a38546a07ddfbdbd254 | refs/heads/master | 2022-12-02T05:06:08.297759 | 2020-06-24T21:36:32 | 2020-06-24T21:36:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,659 | py | # sqlalchemy/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import util as _util # noqa
from .inspection import inspect # noqa
from .schema import BLANK_SCHEMA # noqa
from .schema import CheckConstraint # noqa
from .schema import Column # noqa
from .schema import ColumnDefault # noqa
from .schema import Computed # noqa
from .schema import Constraint # noqa
from .schema import DDL # noqa
from .schema import DefaultClause # noqa
from .schema import FetchedValue # noqa
from .schema import ForeignKey # noqa
from .schema import ForeignKeyConstraint # noqa
from .schema import Index # noqa
from .schema import MetaData # noqa
from .schema import PassiveDefault # noqa
from .schema import PrimaryKeyConstraint # noqa
from .schema import Sequence # noqa
from .schema import Table # noqa
from .schema import ThreadLocalMetaData # noqa
from .schema import UniqueConstraint # noqa
from .sql import alias # noqa
from .sql import all_ # noqa
from .sql import and_ # noqa
from .sql import any_ # noqa
from .sql import asc # noqa
from .sql import between # noqa
from .sql import bindparam # noqa
from .sql import case # noqa
from .sql import cast # noqa
from .sql import collate # noqa
from .sql import column # noqa
from .sql import delete # noqa
from .sql import desc # noqa
from .sql import distinct # noqa
from .sql import except_ # noqa
from .sql import except_all # noqa
from .sql import exists # noqa
from .sql import extract # noqa
from .sql import false # noqa
from .sql import func # noqa
from .sql import funcfilter # noqa
from .sql import insert # noqa
from .sql import intersect # noqa
from .sql import intersect_all # noqa
from .sql import join # noqa
from .sql import lateral # noqa
from .sql import literal # noqa
from .sql import literal_column # noqa
from .sql import modifier # noqa
from .sql import not_ # noqa
from .sql import null # noqa
from .sql import nullsfirst # noqa
from .sql import nullslast # noqa
from .sql import or_ # noqa
from .sql import outerjoin # noqa
from .sql import outparam # noqa
from .sql import over # noqa
from .sql import select # noqa
from .sql import subquery # noqa
from .sql import table # noqa
from .sql import tablesample # noqa
from .sql import text # noqa
from .sql import true # noqa
from .sql import tuple_ # noqa
from .sql import type_coerce # noqa
from .sql import union # noqa
from .sql import union_all # noqa
from .sql import update # noqa
from .sql import within_group # noqa
from .types import ARRAY # noqa
from .types import BIGINT # noqa
from .types import BigInteger # noqa
from .types import BINARY # noqa
from .types import Binary # noqa
from .types import BLOB # noqa
from .types import BOOLEAN # noqa
from .types import Boolean # noqa
from .types import CHAR # noqa
from .types import CLOB # noqa
from .types import DATE # noqa
from .types import Date # noqa
from .types import DATETIME # noqa
from .types import DateTime # noqa
from .types import DECIMAL # noqa
from .types import Enum # noqa
from .types import FLOAT # noqa
from .types import Float # noqa
from .types import INT # noqa
from .types import INTEGER # noqa
from .types import Integer # noqa
from .types import Interval # noqa
from .types import JSON # noqa
from .types import LargeBinary # noqa
from .types import NCHAR # noqa
from .types import NUMERIC # noqa
from .types import Numeric # noqa
from .types import NVARCHAR # noqa
from .types import PickleType # noqa
from .types import REAL # noqa
from .types import SMALLINT # noqa
from .types import SmallInteger # noqa
from .types import String # noqa
from .types import TEXT # noqa
from .types import Text # noqa
from .types import TIME # noqa
from .types import Time # noqa
from .types import TIMESTAMP # noqa
from .types import TypeDecorator # noqa
from .types import Unicode # noqa
from .types import UnicodeText # noqa
from .types import VARBINARY # noqa
from .types import VARCHAR # noqa
from .engine import create_engine # noqa nosort
from .engine import engine_from_config # noqa nosort
__version__ = "1.3.17"
def __go(lcls):
global __all__
from . import events # noqa
from . import util as _sa_util
import inspect as _inspect
__all__ = sorted(
name
for name, obj in lcls.items()
if not (name.startswith("_") or _inspect.ismodule(obj))
)
_sa_util.dependencies.resolve_all("sqlalchemy")
__go(locals())
| [
"muhammadabdullah@wanclouds.net"
] | muhammadabdullah@wanclouds.net |
6822f81d9f94b272ee76b01d65f926ac917a2f80 | dfaf6f7ac83185c361c81e2e1efc09081bd9c891 | /k8sdeployment/k8sstat/python/kubernetes/test/test_runtime_raw_extension.py | ee67e7a373c69490627d5edc9482fa2e486fd0ae | [
"Apache-2.0",
"MIT"
] | permissive | JeffYFHuang/gpuaccounting | d754efac2dffe108b591ea8722c831d979b68cda | 2c63a63c571240561725847daf1a7f23f67e2088 | refs/heads/master | 2022-08-09T03:10:28.185083 | 2022-07-20T00:50:06 | 2022-07-20T00:50:06 | 245,053,008 | 0 | 0 | MIT | 2021-03-25T23:44:50 | 2020-03-05T02:44:15 | JavaScript | UTF-8 | Python | false | false | 968 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.runtime_raw_extension import RuntimeRawExtension # noqa: E501
from kubernetes.client.rest import ApiException
class TestRuntimeRawExtension(unittest.TestCase):
"""RuntimeRawExtension unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRuntimeRawExtension(self):
"""Test RuntimeRawExtension"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.runtime_raw_extension.RuntimeRawExtension() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"JeffYFHuang@github.com"
] | JeffYFHuang@github.com |
50c91ad80d12b49cb8dd5fa2e3e23d87d64c3ce0 | 46667df8344db58698838d677bdae377b3c3c53c | /Data Manipulation with Pandas/Part 2/25.upsampling-data.py | 18d91a2eac217beb607b6e8648ee86d48cbb6b62 | [] | no_license | bennysetiawan/DQLab-Career-2021 | 278577cdddb3852c57f799cd1207b4ff45962960 | 0822d15e3b24cf0146c23456d4b65b0fb00a53fc | refs/heads/master | 2023-06-06T13:24:21.289929 | 2021-06-23T17:09:14 | 2021-06-23T17:09:14 | 379,657,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | import pandas as pd
# Load dataset https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/LO4/global_air_quality_4000rows.csv
gaq = pd.read_csv('https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/LO4/global_air_quality_4000rows.csv')
gaq['timestamp'] = pd.to_datetime(gaq['timestamp'])
gaq = gaq.set_index('timestamp')
print('Dataset sebelum di-upsampling (5 teratas):\n', gaq.head())
# Upsampling dari daily to hourly dan kita hitung reratanya
gaq_hourly = gaq.resample('H').mean()
print('Upsampling daily to hourly - mean (5 teratas):\n', gaq_hourly.head()) | [
"setiawanb25@gmail.com"
] | setiawanb25@gmail.com |
f04775a90eb47f46df8bc83f530d0483eb919a60 | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/tenth/rank_1ay7_O.py | 0e06f6a99063b4335484694384ed33130ec83f0a | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '1ay7.csv'
identifier = 'O'
thresholdCoef = 0.1
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/tenth/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/tenth/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Drop features with coefficients below threshold
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs = coefs[np.abs(coefs['coefficients']) < thresholdCoef]
dropList = list(coefs.index)
del coefs
df1.drop(dropList, axis = 1, inplace = True)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"tanemur1@msu.edu"
] | tanemur1@msu.edu |
556405e629f0f2151963bc39b08f1197eac1b386 | 78b160d8131f3c4b7aef0d051b040825a9c50e0d | /algoexpert/easy/palindromeCheck.py | 67e4ca78dd3bd4d9c8bce9e602b51280a4a5ece4 | [
"MIT"
] | permissive | ardakkk/Algorithms-and-Data-Structures | 744f8c9ffb233b95040e5bdcbddb9f5d2ff7a5ba | c428bb0bd7eeb6c34448630f88f13e1329b54636 | refs/heads/master | 2021-07-08T22:40:40.361282 | 2020-07-20T10:39:58 | 2020-07-20T10:39:58 | 156,005,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | # Time: O(n^2) | Space: O(n)
# def isPalindrome(string):
# reversedString = ""
#
# for i in reversed(range(len(string))):
# reversedString += string[i]
# return string == reversedString
# Time: O(n) | Space: O(n)
# def isPlaindrome(string):
# reversedChars = []
#
# for i in reversed(range(len(string))):
# reversedChars.append(string[i])
# return string == "".join(reversedChars)
# Time: O(n) | Space: O(n)
# def isPlaindrome(string, i = 0):
# j = len(string) - 1 - i
# return True if i >= j else string[i] == string[j] and isPlaindrome(string, i + 1)
# Time: O(n) | Space: O(n)
# def isPalindrome(string, i = 0):
# j = len(string) - 1 - i
#
# if i >= j:
# return True
# if string[i] != string[j]:
# return False
#
# return isPalindrome(string, i + 1)
# Time: O(n) | Space: O(1)
def isPalindrome(string):
leftIdx = 0
rightIdx = len(string) - 1
while leftIdx < rightIdx:
if string[leftIdx] != string[rightIdx]:
return False
leftIdx += 1
rightIdx -= 1
return True
print(isPalindrome('abcdcba'))
| [
"ardakfuse@gmail.com"
] | ardakfuse@gmail.com |
e73bc712fb8c9aaa9b6e279837ea9cba1a4624f9 | 09dd58f46b1e914278067a69142230c7af0165c2 | /blackmamba/lib/flake8/options/aggregator.py | 5b8ab9c33b475d3ad576e839636fd2baf3f73f86 | [
"MIT"
] | permissive | zrzka/blackmamba | 4e70262fbe3702553bf5d285a81b33eb6b3025ea | b298bc5d59e5aea9d494282910faf522c08ebba9 | refs/heads/master | 2021-01-01T18:43:19.490953 | 2020-01-20T08:26:33 | 2020-01-20T08:26:33 | 98,410,391 | 72 | 12 | MIT | 2020-01-20T08:26:35 | 2017-07-26T10:21:15 | Python | UTF-8 | Python | false | false | 3,255 | py | """Aggregation function for CLI specified options and config file options.
This holds the logic that uses the collected and merged config files and
applies the user-specified command-line configuration on top of it.
"""
import logging
from flake8.options import config
LOG = logging.getLogger(__name__)
def aggregate_options(manager, config_finder, arglist=None, values=None):
"""Aggregate and merge CLI and config file options.
:param flake8.options.manager.OptionManager manager:
The instance of the OptionManager that we're presently using.
:param flake8.options.config.ConfigFileFinder config_finder:
The config file finder to use.
:param list arglist:
The list of arguments to pass to ``manager.parse_args``. In most cases
this will be None so ``parse_args`` uses ``sys.argv``. This is mostly
available to make testing easier.
:param optparse.Values values:
Previously parsed set of parsed options.
:returns:
Tuple of the parsed options and extra arguments returned by
``manager.parse_args``.
:rtype:
tuple(optparse.Values, list)
"""
# Get defaults from the option parser
default_values, _ = manager.parse_args([], values=values)
# Get original CLI values so we can find additional config file paths and
# see if --config was specified.
original_values, _ = manager.parse_args(arglist)
# Make our new configuration file mergerator
config_parser = config.MergedConfigParser(
option_manager=manager,
config_finder=config_finder,
)
# Get the parsed config
parsed_config = config_parser.parse(original_values.config,
original_values.isolated)
# Extend the default ignore value with the extended default ignore list,
# registered by plugins.
extended_default_ignore = manager.extended_default_ignore.copy()
LOG.debug('Extended default ignore list: %s',
list(extended_default_ignore))
extended_default_ignore.update(default_values.ignore)
default_values.ignore = list(extended_default_ignore)
LOG.debug('Merged default ignore list: %s', default_values.ignore)
extended_default_select = manager.extended_default_select.copy()
LOG.debug('Extended default select list: %s',
list(extended_default_select))
default_values.extended_default_select = extended_default_select
# Merge values parsed from config onto the default values returned
for config_name, value in parsed_config.items():
dest_name = config_name
# If the config name is somehow different from the destination name,
# fetch the destination name from our Option
if not hasattr(default_values, config_name):
dest_name = config_parser.config_options[config_name].dest
LOG.debug('Overriding default value of (%s) for "%s" with (%s)',
getattr(default_values, dest_name, None),
dest_name,
value)
# Override the default values with the config values
setattr(default_values, dest_name, value)
# Finally parse the command-line options
return manager.parse_args(arglist, default_values)
| [
"rvojta@me.com"
] | rvojta@me.com |
6d41bc6c5b7e28373bc88fa9ad52239f056dbc2c | 36821b9fcdbefe88a60f584e7d39695ca5fe6177 | /codeforces/1453/A.py | a93ebbcf9a29319282fbae77a215b4aaceb35e41 | [] | no_license | shubham409/CodeSubmits | 231fc40a64ad97323e558ba2fa252c62f34c7809 | 5da4d9cc87d4ac8f54175723c2acf77fc5784f21 | refs/heads/master | 2023-06-26T06:17:30.255973 | 2021-06-03T18:24:00 | 2021-07-29T20:35:01 | 329,399,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | def fun(ls, lt):
st = set(ls)
count = 0
for i in lt:
if(i in st):
count += 1
print(count)
T = int(input())
for i in range(T):
n, k = list(map(int, input().split()))
ls = list(map(int, input().split()))
lt = list(map(int, input().split()))
fun(ls, lt) | [
"sm091274@gmail.com"
] | sm091274@gmail.com |
58789926c4ec41d87ecb91c85728560a035ea6c8 | 2c3e2d7da1e62bd75229fad0c8e18431a420b8a1 | /tidy_headers/_parse_array.py | ac1f4780b57b533b14d09b060e35fe2b661aa445 | [
"MIT"
] | permissive | ksunden/tidy_headers | 9526c3b522257f9dec4729fcdbcc09e7db68b6b3 | 060942204b5bb87a8b209e81e1b64fd3cbb0691f | refs/heads/master | 2020-03-13T02:55:24.394455 | 2017-11-13T03:08:06 | 2017-11-13T03:08:06 | 130,934,077 | 0 | 0 | null | 2018-04-25T01:34:24 | 2018-04-25T01:34:24 | null | UTF-8 | Python | false | false | 2,092 | py | """Parse array."""
# --- import -------------------------------------------------------------------------------------
import re
import numpy as np
from ._utilities import flatten_list
# --- parse --------------------------------------------------------------------------------------
def array2string(array, sep='\t'):
"""Generate a string from an array with useful formatting.
Great for writing arrays into single lines in files.
See Also
--------
string2array
"""
np.set_printoptions(threshold=array.size)
string = np.array2string(array, separator=sep)
string = string.replace('\n', sep)
string = re.sub(r'({})(?=\1)'.format(sep), '', string)
return string
def string2array(string, sep='\t'):
"""Generate an array from a string created using array2string.
See Also
--------
array2string
"""
# discover size
size = string.count('\t') + 1
# discover dimensionality
dimensionality = 0
while string[dimensionality] == '[':
dimensionality += 1
# discover shape
shape = []
for i in range(1, dimensionality + 1)[::-1]:
to_match = '[' * (i - 1)
count_positive = string.count(to_match + ' ')
count_negative = string.count(to_match + '-')
shape.append(count_positive + count_negative)
shape[-1] = size / shape[-2]
for i in range(1, dimensionality - 1)[::-1]:
shape[i] = shape[i] / shape[i - 1]
shape = tuple([int(s) for s in shape])
# import list of floats
lis = string.split(' ')
# annoyingly series of negative values get past previous filters
lis = flatten_list([i.split('-') for i in lis])
for i, item in enumerate(lis):
bad_chars = ['[', ']', '\t', '\n']
for bad_char in bad_chars:
item = item.replace(bad_char, '')
lis[i] = item
for i in range(len(lis))[::-1]:
try:
lis[i] = float(lis[i])
except ValueError:
lis.pop(i)
# create and reshape array
arr = np.array(lis)
arr.shape = shape
# finish
return arr
| [
"blaise@untzag.com"
] | blaise@untzag.com |
d8bb8e646968f06a0614abc39cd6ba7e62e1df63 | ddd35c693194aefb9c009fe6b88c52de7fa7c444 | /Live 10.1.18/VCM600/VCM600.py | d7805481023c57c96160c4fb4feb1534cdf912e5 | [] | no_license | notelba/midi-remote-scripts | 819372d9c22573877c7912091bd8359fdd42585d | e3ec6846470eed7da8a4d4f78562ed49dc00727b | refs/heads/main | 2022-07-30T00:18:33.296376 | 2020-10-04T00:00:12 | 2020-10-04T00:00:12 | 301,003,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,780 | py | # uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.8.5 (default, Aug 12 2020, 00:00:00)
# [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
# Embedded file name: c:\Jenkins\live\output\Live\win_64_static\Release\python-bundle\MIDI Remote Scripts\VCM600\VCM600.py
# Compiled at: 2020-07-14 15:33:46
from __future__ import absolute_import, print_function, unicode_literals
import Live
from _Framework.ControlSurface import ControlSurface
from _Framework.InputControlElement import *
from _Framework.SliderElement import SliderElement
from _Framework.ButtonElement import ButtonElement
from _Framework.EncoderElement import EncoderElement
from _Framework.ChannelStripComponent import ChannelStripComponent
from _Framework.DeviceComponent import DeviceComponent
from _Framework.ClipSlotComponent import ClipSlotComponent
from _Framework.SceneComponent import SceneComponent
from _Framework.SessionComponent import SessionComponent
from _Framework.ChannelTranslationSelector import ChannelTranslationSelector
from .ViewTogglerComponent import ViewTogglerComponent
from .MixerComponent import MixerComponent
from .TransportComponent import TransportComponent
NUM_TRACKS = 12
class VCM600(ControlSurface):
""" Script for Vestax's VCM600 Controller """
def __init__(self, c_instance):
ControlSurface.__init__(self, c_instance)
with self.component_guard():
self._setup_session_control()
self._setup_mixer_control()
self._setup_device_control()
self._setup_transport_control()
self._setup_view_control()
def _setup_session_control(self):
is_momentary = True
down_button = ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 89)
up_button = ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 90)
session = SessionComponent(NUM_TRACKS, 0)
session.set_select_buttons(down_button, up_button)
session.selected_scene().set_launch_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 87))
track_stop_buttons = [ ButtonElement(is_momentary, MIDI_NOTE_TYPE, index, 68) for index in range(NUM_TRACKS)
]
session.set_stop_track_clip_buttons(tuple(track_stop_buttons))
for index in range(NUM_TRACKS):
session.selected_scene().clip_slot(index).set_launch_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, index, 69))
def _setup_mixer_control(self):
is_momentary = True
mixer = MixerComponent(NUM_TRACKS, 2)
for track in range(NUM_TRACKS):
strip = mixer.channel_strip(track)
strip.set_volume_control(SliderElement(MIDI_CC_TYPE, track, 23))
strip.set_pan_control(EncoderElement(MIDI_CC_TYPE, track, 10, Live.MidiMap.MapMode.absolute))
strip.set_send_controls((
EncoderElement(MIDI_CC_TYPE, track, 19, Live.MidiMap.MapMode.absolute),
EncoderElement(MIDI_CC_TYPE, track, 20, Live.MidiMap.MapMode.absolute)))
strip.set_solo_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, track, 64))
strip.set_mute_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, track, 63))
strip.set_crossfade_toggle(ButtonElement(is_momentary, MIDI_NOTE_TYPE, track, 65))
eq = mixer.track_eq(track)
eq.set_gain_controls(tuple([ EncoderElement(MIDI_CC_TYPE, track, 18 - index, Live.MidiMap.MapMode.absolute) for index in range(3)
]))
eq.set_cut_buttons(tuple([ ButtonElement(is_momentary, MIDI_NOTE_TYPE, track, 62 - index) for index in range(3)
]))
filter = mixer.track_filter(track)
filter.set_filter_controls(EncoderElement(MIDI_CC_TYPE, track, 22, Live.MidiMap.MapMode.absolute), EncoderElement(MIDI_CC_TYPE, track, 21, Live.MidiMap.MapMode.absolute))
for ret_track in range(2):
strip = mixer.return_strip(ret_track)
strip.set_volume_control(SliderElement(MIDI_CC_TYPE, 12, 22 + ret_track))
strip.set_pan_control(EncoderElement(MIDI_CC_TYPE, 12, 20 + ret_track, Live.MidiMap.MapMode.absolute))
strip.set_mute_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 78 + ret_track))
mixer.set_crossfader_control(SliderElement(MIDI_CC_TYPE, 12, 8))
mixer.set_prehear_volume_control(EncoderElement(MIDI_CC_TYPE, 12, 24, Live.MidiMap.MapMode.absolute))
mixer.master_strip().set_volume_control(SliderElement(MIDI_CC_TYPE, 12, 7))
mixer.master_strip().set_pan_control(EncoderElement(MIDI_CC_TYPE, 12, 10, Live.MidiMap.MapMode.absolute))
return mixer
def _setup_device_control(self):
is_momentary = True
device_bank_buttons = []
device_param_controls = []
for index in range(8):
device_bank_buttons.append(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 70 + index))
device_param_controls.append(EncoderElement(MIDI_CC_TYPE, 12, 12 + index, Live.MidiMap.MapMode.absolute))
device = DeviceComponent()
device.set_bank_buttons(tuple(device_bank_buttons))
device.set_parameter_controls(tuple(device_param_controls))
device_translation_selector = ChannelTranslationSelector()
device_translation_selector.set_controls_to_translate(tuple(device_param_controls))
device_translation_selector.set_mode_buttons(tuple(device_bank_buttons))
self.set_device_component(device)
def _setup_transport_control(self):
is_momentary = True
transport = TransportComponent()
transport.set_play_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 80))
transport.set_record_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 81))
transport.set_nudge_buttons(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 86), ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 85))
transport.set_loop_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 84))
transport.set_punch_buttons(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 82), ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 83))
transport.set_tempo_control(SliderElement(MIDI_CC_TYPE, 12, 26), SliderElement(MIDI_CC_TYPE, 12, 25))
def _setup_view_control(self):
is_momentary = True
view = ViewTogglerComponent(NUM_TRACKS)
view.set_buttons(tuple([ ButtonElement(is_momentary, MIDI_NOTE_TYPE, track, 67) for track in range(NUM_TRACKS)
]), tuple([ ButtonElement(is_momentary, MIDI_NOTE_TYPE, track, 66) for track in range(NUM_TRACKS)
]))
# okay decompiling /home/deniz/data/projects/midiremote/Live 10.1.18/VCM600/VCM600.pyc
| [
"notelba@example.com"
] | notelba@example.com |
c63fca29896bfff9b615895fc46e9674f2c87b44 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_292/ch79_2020_04_07_15_56_18_513329.py | d012de9b08ecbc09c2525d1628c5a9a84c203598 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | def monta_dicionario(l1,l2):
dicionario = {}
for i in range(len(l1)):
dicionario[l1[i]]=l2[i]
return dicionario | [
"you@example.com"
] | you@example.com |
08959099af5bd095a8dc537ede88a16da5dbe231 | 797e83cd492c22c8b7e456b76ae9efb45e102e30 | /chapter1_A_Sneak_Preview/Step2/dump_db_pickle.py | 2efbafd3db26064e7f0fb1eacb1af56595a51304 | [] | no_license | skyaiolos/ProgrammingPython4th | 013e2c831a6e7836826369d55aa9435fe91c2026 | a6a98077440f5818fb0bd430a8f9a5d8bf0ce6d7 | refs/heads/master | 2021-01-23T11:20:38.292728 | 2017-07-20T03:22:59 | 2017-07-20T03:22:59 | 93,130,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | import pickle
# dbfile = open('people-pickle', 'rb') # use binary mode files in 3.X
with open('people-pickle', 'rb') as f:
db = pickle.load(f)
for key in db:
print(key, '=>\n ', db[key])
print(db['sue']['name'])
# bob =>
# {'name': 'Bob Smith', 'age': 42, 'pay': 30000, 'job': 'dev'}
# sue =>
# {'name': 'Sue Jones', 'age': 45, 'pay': 40000, 'job': 'hdw'}
# tom =>
# {'name': 'Tom', 'age': 50, 'pay': 0, 'job': None}
# Sue Jones
| [
"skyaiolos@aliyun.com"
] | skyaiolos@aliyun.com |
00c0734af882609c9d0bb4bb27ff77f501034d52 | cdbaec17aa8411a1455b42520154cc9f30da3550 | /Leetcode 5/Pacific Atlantic Water Flow 2.py | 5ad9ed017d084ffd7100e845cc4497821387d475 | [] | no_license | PiyushChaturvedii/My-Leetcode-Solutions-Python- | bad986978a7e72a3fda59b652cda79802377ab2f | 86138195f6f343f0acc97da286f4f4811a0d0e48 | refs/heads/master | 2021-10-09T20:19:11.186191 | 2019-01-03T05:15:33 | 2019-01-03T05:15:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | class Solution(object):
def pacificAtlantic(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
m = len(matrix)
n = len(matrix[0]) if m else 0
if m * n == 0: return []
topEdge = [(0, y) for y in range(n)]
leftEdge = [(x, 0) for x in range(m)]
pacific = set(topEdge + leftEdge)
bottomEdge = [(m - 1, y) for y in range(n)]
rightEdge = [(x, n - 1) for x in range(m)]
atlantic = set(bottomEdge + rightEdge)
def bfs(vset):
dz = zip((1, 0, -1, 0), (0, 1, 0, -1))
queue = list(vset)
while queue:
hx, hy = queue.pop(0)
for dx, dy in dz:
nx, ny = hx + dx, hy + dy
if 0 <= nx < m and 0 <= ny < n:
if matrix[nx][ny] >= matrix[hx][hy]:
if (nx, ny) not in vset:
queue.append((nx, ny))
vset.add((nx, ny))
bfs(pacific)
bfs(atlantic)
result = pacific & atlantic
return map(list, result)
matrix=[[1,2,2,3,5],[3,2,3,4,4],[2,4,5,3,1],[6,7,1,4,5],[5,1,1,2,4]]
c=Solution().pacificAtlantic(matrix) | [
"noreply@github.com"
] | PiyushChaturvedii.noreply@github.com |
1fbe035cdeff7017e360ea5dbf43f22876d2e3a9 | af7df9d77a2545b54d8cd03e7f4633dce6125f4a | /ch08/viewer-pil.py | a954ddc59ca3ec169dfbc48de4909a0fb22381eb | [] | no_license | socrates77-sh/PP4E | 71e6522ea2e7cfd0c68c1e06ceb4d0716cc0f0bd | c92e69aea50262bfd63e95467ae4baf7cdc2f22f | refs/heads/master | 2020-05-29T08:46:47.380002 | 2018-11-16T10:38:44 | 2018-11-16T10:38:44 | 69,466,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | """
show one image with PIL photo replacement object
handles many more image types; install PIL first: placed in Lib\site-packages
"""
import os
import sys
from tkinter import *
from PIL.ImageTk import PhotoImage # <== use PIL replacement class
# rest of code unchanged
imgdir = 'E:\\workspace\\PP4E-Examples-1.2\\Examples\\PP4E\\Gui\\PIL\\images'
imgfile = 'florida-2009-1.jpg' # does gif, jpg, png, tiff, etc.
if len(sys.argv) > 1:
imgfile = sys.argv[1]
imgpath = os.path.join(imgdir, imgfile)
win = Tk()
win.title(imgfile)
imgobj = PhotoImage(file=imgpath) # now JPEGs work!
Label(win, image=imgobj).pack()
win.mainloop()
print(imgobj.width(), imgobj.height()) # show size in pixels on exit
| [
"zhwenrong@sina.com"
] | zhwenrong@sina.com |
4b1f81a7f96f17aceb49489dc87ce9196f26aebb | 8f24e443e42315a81028b648e753c50967c51c78 | /rllib/models/jax/jax_action_dist.py | 864cd065cee6c7efd858dbb23cc9e1fbc01c5e88 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | simon-mo/ray | d07efdada8d05c6e10417f96e8dfc35f9ad33397 | 1e42e6cd15e2fb96c217cba8484e59ed0ef4b0c8 | refs/heads/master | 2023-03-06T00:09:35.758834 | 2022-12-23T18:46:48 | 2022-12-23T18:46:48 | 122,156,396 | 4 | 2 | Apache-2.0 | 2023-03-04T08:56:56 | 2018-02-20T04:47:06 | Python | UTF-8 | Python | false | false | 2,423 | py | import time
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_jax, try_import_tfp
from ray.rllib.utils.typing import TensorType, List
jax, flax = try_import_jax()
tfp = try_import_tfp()
class JAXDistribution(ActionDistribution):
"""Wrapper class for JAX distributions."""
@override(ActionDistribution)
def __init__(self, inputs: List[TensorType], model: ModelV2):
super().__init__(inputs, model)
# Store the last sample here.
self.last_sample = None
# Use current time as pseudo-random number generator's seed.
self.prng_key = jax.random.PRNGKey(seed=int(time.time()))
@override(ActionDistribution)
def logp(self, actions: TensorType) -> TensorType:
return self.dist.log_prob(actions)
@override(ActionDistribution)
def entropy(self) -> TensorType:
return self.dist.entropy()
@override(ActionDistribution)
def kl(self, other: ActionDistribution) -> TensorType:
return self.dist.kl_divergence(other.dist)
@override(ActionDistribution)
def sample(self) -> TensorType:
# Update the state of our PRNG.
_, self.prng_key = jax.random.split(self.prng_key)
self.last_sample = jax.random.categorical(self.prng_key, self.inputs)
return self.last_sample
@override(ActionDistribution)
def sampled_action_logp(self) -> TensorType:
assert self.last_sample is not None
return self.logp(self.last_sample)
class JAXCategorical(JAXDistribution):
"""Wrapper class for a JAX Categorical distribution."""
@override(ActionDistribution)
def __init__(self, inputs, model=None, temperature=1.0):
if temperature != 1.0:
assert temperature > 0.0, "Categorical `temperature` must be > 0.0!"
inputs /= temperature
super().__init__(inputs, model)
self.dist = tfp.experimental.substrates.jax.distributions.Categorical(
logits=self.inputs
)
@override(ActionDistribution)
def deterministic_sample(self):
self.last_sample = self.inputs.argmax(axis=1)
return self.last_sample
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return action_space.n
| [
"noreply@github.com"
] | simon-mo.noreply@github.com |
1578d7b129691847b352ad44b707bf582bf35fbd | 673e829dda9583c8dd2ac8d958ba1dc304bffeaf | /data/multilingual/Latn.TOP/Sun-ExtA_12/pdf_to_json_test_Latn.TOP_Sun-ExtA_12.py | e426ed44c203532496b95fe8493f518f68f56d58 | [
"BSD-3-Clause"
] | permissive | antoinecarme/pdf_to_json_tests | 58bab9f6ba263531e69f793233ddc4d33b783b7e | d57a024fde862e698d916a1178f285883d7a3b2f | refs/heads/master | 2021-01-26T08:41:47.327804 | 2020-02-27T15:54:48 | 2020-02-27T15:54:48 | 243,359,934 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.TOP/Sun-ExtA_12/udhr_Latn.TOP_Sun-ExtA_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
9e8c98d739ff55d5f3cdf1c3ed99ca911570979e | 7b750c5c9df2fb05e92b16a43767c444404de7ae | /src/leetcode/python3/leetcode5.py | 857e394520e476e114ce1ce401e547ae69f40be6 | [] | no_license | renaissance-codes/leetcode | a68c0203fe4f006fa250122614079adfe6582d78 | de6db120a1e709809d26e3e317c66612e681fb70 | refs/heads/master | 2022-08-18T15:05:19.622014 | 2022-08-05T03:34:01 | 2022-08-05T03:34:01 | 200,180,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
寻找最长回文字符串
"""
# 比较朴素的思路,使用额外空间存储短字符串是否是回文字符串,时间5968ms, 效率不够高
class Solution:
def longestPalindrome(self, s: str) -> str:
if len(s) < 2:
return s
s_metric = [[1 if i == j else 0 for j in range(len(s))] for i in range(len(s))]
longest_s = s[0]
longest_len = 1
while len(s) - longest_len:
for i in range(len(s) - longest_len):
if longest_len == 1:
if s[i] == s[i + longest_len]:
s_metric[i][i + longest_len] = 1
longest_s = s[i:i + longest_len + 1]
else:
if s_metric[i + 1][i + longest_len - 1] and s[i] == s[i + longest_len]:
s_metric[i][i + longest_len] = 1
longest_s = s[i:i + longest_len + 1]
longest_len += 1
return longest_s
| [
"jack.li@eisoo.com"
] | jack.li@eisoo.com |
5f518d5a2d3485884b423ab1d9f6a7b2e6acd87b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03200/s568843924.py | 40d7ef36da7f147dcae37fe4fd5f54e43d59e969 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | s = list(input())
len_s = len(s)
len_b = s.count('B')
b_index_list = []
for i in range(len_s):
if s[i] == 'B':
b_index_list.append(i)
b_index_list.reverse()
l0 = len_s - 1
cnt = 0
for b in b_index_list:
cnt += l0 - b
l0 -= 1
print(cnt)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
7dd7415ee5474dea20b15224b3b981dc2bb0b6cc | 33f32d78087491e989289c46e5d2df5400e23946 | /leetcode/Unsorted_Algorthm_Problems/Split_a_String_in_Balanced_Strings.py | f69cac1b2923e15b2c29e2164c5e53af3d96043f | [] | no_license | xulleon/algorithm | 1b421989423640a44339e6edb21c054b6eb47a30 | b1f93854006a9b1e1afa4aadf80006551d492f8a | refs/heads/master | 2022-10-08T19:54:18.123628 | 2022-09-29T05:05:23 | 2022-09-29T05:05:23 | 146,042,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # https://leetcode.com/problems/split-a-string-in-balanced-strings/
class Solution:
def balancedStringSplit(self, s: str) -> int:
# variable labled assume start with R. RLLLLRRRLR
count, l, r, = 0, 0, 0
for char in s:
if char == 'R':
r += 1
else:
l += 1
if r == l:
count += 1
return count
| [
"leonxu@yahoo.com"
] | leonxu@yahoo.com |
e17020abef3c21e15e8849965d0e461d1633248a | ffcd795f30483a19d2717f08b1aaf59a7fd4fd7e | /Math Quiz.py | 6f82a81c20f6e0ab747e6c4a7bd8755010a2179d | [] | no_license | Botany-Downs-Secondary-College/mathsquiz-simonbargh | 5791b4810790128878e7cd28678c3d4af3beb07d | d5aba85e9d522248827301130976fe5d5a45e11a | refs/heads/main | 2023-03-13T18:05:25.291569 | 2021-02-22T21:28:58 | 2021-02-22T21:28:58 | 337,539,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,402 | py | from tkinter import *
from tkinter import ttk
import random
import time
class Welcome:
def __init__(self, parent):
def Questions():
# Checking whether the user's entry details meet the requirements
'''def show_questions():
try:
if self.NameEntry.get() == "":
self.EntryErrorLabel.configure(text = "Enter your name.")
self.NameEntry.focus()
except ValueError:
self.EntryErrorLabel.configure(text = "Enter your age as a number.")
self.AgeEntry.delete(0, END)
self.AgeEntry.focus()'''
if len(self.NameEntry.get()) >= 1:
if len(self.AgeEntry.get()) >= 1: # and self.AgeEntry.get() is not int:
if clicked.get() == "Easy" or clicked.get() == "Medium" or clicked.get() == "Hard":
frames = Quiz(root)
self.Welcome.grid_forget()
else:
self.EntryErrorLabel.configure(text = "Choose a difficulty level.")
else:
self.EntryErrorLabel.configure(text = "Enter your age.")
else:
self.EntryErrorLabel.configure(text = "Enter your name.")
# Welcome Frame
self.Welcome = Frame(parent)
self.Welcome.grid(row = 0, column = 0)
self.TitleLabel = Label(self.Welcome, text = "Welcome to Maths Quiz!", bg = "lightblue", fg = "blue", width = 24, padx = 30, pady = 10, font = ("Time", "12", "italic", "bold"))
self.TitleLabel.grid(columnspan = 2)
self.NextButton = ttk.Button(self.Welcome, text = "Next", command = Questions)
self.NextButton.grid(row = 5, column = 1, pady = 10)
# Name Label
self.NameLabel = Label(self.Welcome, text = "Name", anchor = W, fg = "black", width = 10, padx = 30, pady = 10, font = ("Time", "12", "bold"))
self.NameLabel.grid(row = 2, column = 0)
# Age Label
self.AgeLabel = Label(self.Welcome, text = "Age", anchor = W, fg = "black", width = 10, padx = 30, pady = 10, font = ("Time", "12", "bold"))
self.AgeLabel.grid(row = 3, column = 0)
# Name Entry
self.NameEntry = ttk.Entry(self.Welcome, width = 20)
self.NameEntry.grid(row = 2, column = 1, columnspan = 2)
# Age Entry
self.AgeEntry = ttk.Entry(self.Welcome, width = 20)
self.AgeEntry.grid(row = 3, column = 1, columnspan = 2)
# Difficulty Level
self.DifficultyLabel = Label(self.Welcome, text = "Difficulty Level", anchor = W, fg = "black", width = 10, padx = 30, pady = 10, font = ("Time", "12", "bold"))
self.DifficultyLabel.grid(row = 4, column = 0)
# Difficulty Options
options = ["Easy", "Medium", "Hard"]
clicked = StringVar()
clicked.set("Select an Option")
diff_level = OptionMenu(self.Welcome, clicked, *options)
diff_level.grid(row = 4, column = 1)
# Warning Error Label
self.EntryErrorLabel = Label(self.Welcome, text = "", fg = "red", width = 10, padx = 50, pady = 10)
self.EntryErrorLabel.grid(row = 6, column = 0, columnspan = 2)
class Quiz:
def __init__(self, parent):
def Welcome_Page():
frames = Welcome(root)
self.Quiz.grid_forget()
# Quiz Frame
self.Quiz = Frame(parent)
self.Quiz.grid(row = 0, column = 0)
self.TitleLabel = Label(self.Quiz, text = "Questions", bg = "lightblue", fg = "black", width = 20, padx = 30, pady = 10)
self.TitleLabel.grid(columnspan = 2)
self.BackButton = ttk.Button(self.Quiz, text = "Back", command = Welcome_Page)
self.BackButton.grid(row = 8, column = 1, pady = 10)
if __name__ == "__main__":
root = Tk()
frames = Welcome(root)
root.title("Quiz")
root.mainloop()
| [
"noreply@github.com"
] | Botany-Downs-Secondary-College.noreply@github.com |
8221804a8b71f27558952a6fff2ea180d901387e | 0e1a0329e1b96405d3ba8426fd4f935aa4d8b04b | /scraper/merge.py | 15c94f3e038d5c181e3f5898d9c5efcb34e92473 | [] | no_license | ugik/Blitz | 6e3623a4a03309e33dcc0b312800e8cadc26d28c | 740f65ecaab86567df31d6a0055867be193afc3d | refs/heads/master | 2021-05-03T20:15:20.516014 | 2015-03-11T12:33:34 | 2015-03-11T12:33:34 | 25,015,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,418 | py | import xlrd, xlwt
import glob, os.path
def merge_xls (in_dir="./", out_file="merged_output.xls"):
xls_files = glob.glob(in_dir + "*.xls")
sheet_names = [os.path.basename(v)[:-4] for v in xls_files]
sheet_excl = [os.path.basename(v)[:-4] for v in xls_files if
len(os.path.basename(v)[:-4]) > 29]
merged_book = xlwt.Workbook()
if in_dir[-1:] != "/": in_dir = in_dir + "/"
xls_files.sort()
if xls_files:
for k, xls_file in enumerate(xls_files):
print "---> Processing file %s" % (xls_file)
if len (sheet_names[k]) <= 29:
book = xlrd.open_workbook(xls_file)
if book.nsheets == 1:
ws = merged_book.add_sheet(sheet_names[k])
sheet = book.sheet_by_index(0)
for rx in range(sheet.nrows):
for cx in range(sheet.ncols):
ws.write(rx, cx, sheet.cell_value(rx, cx))
elif book.nsheets in range(2, 100):
for sheetx in range(book.nsheets):
sheet0n = sheet_names[k]+str(sheetx+1).zfill(2)
ws = merged_book.add_sheet(sheet0n)
sheet = book.sheet_by_index(sheetx)
for rx in range(sheet.nrows):
for cx in range(sheet.ncols):
ws.write(rx, cx, sheet.cell_value(rx, cx))
else:
print "ERROR *** File %s has %s sheets (maximum is 99)"
% (xls_file, book.nsheets)
raise
else:
print "WARNING *** File name too long: <%s.xls> (maximum is
29 chars) " % (sheet_names[k])
print "WARNING *** File <%s.xls> was skipped." %
(sheet_names[k])
merged_book.save(out_file)
print
print "---> Merged xls file written to %s using the following source
files: " % (out_file)
for k, v in enumerate(sheet_names):
if len(v) <= 29:
print "\t", str(k+1).zfill(3), "%s.xls" % (v)
print
if sheet_excl:
print "--> The following files were skipped because the file
name exceeds 29 characters: "
for k, v in enumerate(sheet_excl):
print "\t", str(k+1).zfill(3), v
else:
print "NOTE *** No xls files in %s. Nothing to do." % (in_dir)
| [
"georgek@gmail.com"
] | georgek@gmail.com |
4f774788f282a41dee2b12ddc84f99a3129f478b | d1a8d250cf6e3e61f90b5d122e389d8488a9ff8c | /Travel/urls.py | 92e0a1b5647b5b2c19167ecdbde5cdc9f7fe538a | [] | no_license | dusty-g/Travel | 89501d156a3ea86f4478f7bb41e1f968c7087bd3 | 5ca6061884e7630a0b0365adfa640da3ce1a6c37 | refs/heads/master | 2021-01-20T02:23:18.066638 | 2017-04-30T19:00:56 | 2017-04-30T19:00:56 | 89,401,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | """Travel URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
urlpatterns = [
url(r'^', include('apps.users_app.urls', namespace='users')),
url(r'^travels/', include('apps.trips_app.urls', namespace='travels')),
url(r'^destination/', include('apps.destinations_app.urls', namespace='destinations')),
]
| [
"dustygalindo@gmail.com"
] | dustygalindo@gmail.com |
f1c2edcac27a6ce135430dbd9554ede1eecf2db1 | caeec99c6a0e7d0dd625891c5adacd23ff311892 | /trunk/config.py.sample | 797bfa19df19a436c7f0371bef0150aa14ea9801 | [] | no_license | BGCX067/faccbk-svn-to-git | fad0e44a3ce675d390751a4ff4cc8afbe9a4ebe8 | 82a5a801a9a2e19a2a72cbbdce0324a42ad699a4 | refs/heads/master | 2016-09-01T08:53:29.582184 | 2015-12-28T14:36:39 | 2015-12-28T14:36:39 | 48,699,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,867 | sample | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on Aug 15, 2010
@author: Wang Yuanyi
'''
#please change follow 2 row by your family numbers google account
Admin = '@gmail.com'
Users = ['@gmail.com','@gmail.com']
TEST = False
from wiwikai.faccbk import TransPurposeCategory, TransAccount, Payee, \
trans_type_expense, trans_type_income, trans_account_type_credit_card, \
trans_account_type_debit_card
import os
server_software = os.environ['SERVER_SOFTWARE']
DEVELOPMENT = False
if server_software.startswith('Development'):
DEVELOPMENT = True
TEST = True
if DEVELOPMENT == True:
Admin = 'test@example.com'
Users = ['test@example.com']
if TEST:
def insert_trans_purpose_category(ptitle, ptrans_type):
transTargetCtg = TransPurposeCategory(title = ptitle, trans_type = ptrans_type )
transTargetCtg.put()
def insert_trans_account(plastnumber, ptrans_account_type, pbank_name, pstatement_date, ppayment_due_date):
creditCard = TransAccount(last4number = plastnumber, type=ptrans_account_type, bank_name = pbank_name, statement_date = pstatement_date, payment_due_date =ppayment_due_date )
creditCard.put()
def insert_payee(payee_title):
payee = Payee(title = payee_title)
payee.put()
if TransPurposeCategory.all().count() == 0:
insert_trans_purpose_category(u"家庭食物支出", trans_type_expense)
insert_trans_purpose_category(u"工资收入", trans_type_income)
if TransAccount.all().count() == 0:
insert_trans_account('8888', trans_account_type_credit_card, 'ICBC', 20, 8)
insert_trans_account('7777', trans_account_type_debit_card, 'JBC', 25, 15)
if Payee.all().count() == 0:
insert_payee(u'孩子')
insert_payee(u'老婆')
insert_payee(u'自己') | [
"you@example.com"
] | you@example.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.