blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
80a8ad40352c21f370fe34ef623717fcd0fb0e12 | a6894d17fdbceb56d4364f0e279d03b16a181396 | /working-env/lib/python2.5/TurboGears-1.0.2.2-py2.5.egg/turbogears/i18n/data/gl_ES.py | cd66c622a6504223bf630350451cb5556c01c232 | [] | no_license | thraxil/gtreed | c1c5a19178c1f50ff5e61887b13ff7b004da1d25 | ca228848364edb204b15a7411fd6192379781c78 | refs/heads/master | 2020-04-18T03:02:15.468044 | 2008-12-10T20:02:12 | 2008-12-10T20:02:12 | 88,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | # Formatting configuration for locale gl_ES
languages={'gl': 'galego'}
countries={'ES': u'Espa\xf1a'}
months=['Xaneiro', 'Febreiro', 'Marzo', 'Abril', 'Maio', u'Xu\xf1o', 'Xullo', 'Agosto', 'Setembro', 'Outubro', 'Novembro', 'Decembro']
abbrMonths=['Xan', 'Feb', 'Mar', 'Abr', 'Mai', u'Xu\xf1', 'Xul', 'Ago', 'Set', 'Out', 'Nov', 'Dec']
days=['Luns', 'Martes', u'M\xe9rcores', 'Xoves', 'Venres', u'S\xe1bado', 'Domingo']
abbrDays=['Lun', 'Mar', u'M\xe9r', 'Xov', 'Ven', u'S\xe1b', 'Dom']
dateFormats={'medium': '%%(abbrmonthname)s %d,%y', 'full': '%%(dayname)s %d %%(monthname)s %Y', 'long': '%d %%(monthname)s %Y', 'short': '%d/%m/%y'}
numericSymbols={'group': '.', 'nativeZeroDigit': '0', 'exponential': 'E', 'perMille': u'\u2030', 'nan': u'\ufffd', 'decimal': ',', 'percentSign': '%', 'list': ';', 'patternDigit': '#', 'plusSign': '+', 'infinity': u'\u221e', 'minusSign': '-'} | [
"anders@columbia.edu"
] | anders@columbia.edu |
234fe0b3f16df5e57da1016934fe2f8075019def | 2a31c353c77acc1f134780d81d1be9d7eedfaf75 | /sampleapp/urls.py | 34eb58e25b776a19ee4244acc8478f05ce18ee7d | [] | no_license | CCCodes/Simple-Django-App | 9eb43cd1eca40f031fa4f592a97a632fe892521b | f65f2fecaaf352f01dd9ee6580fc017024eff6bd | refs/heads/master | 2020-03-22T02:35:27.957759 | 2018-07-02T02:23:32 | 2018-07-02T02:23:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | from django.conf.urls import url
from django.contrib import admin
from django.urls import path, include
from sampleapp import views
app_name = "sampleapp"
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^login/$', views.login, name='login'),
url(r'^login_failed/$', views.login_failed, name='login_failed'),
url(r'^login_submit/$', views.login_submit, name='login_submit'),
url(r'^select/$', views.select, name='select'),
url(r'^output/$', views.output, name='output')
]
| [
"caitlinchou@gmail.com"
] | caitlinchou@gmail.com |
d3f138c72c0428b9ca0d2101a5a8994f16946352 | 19b9b6062c491060a63078b5b9947deb4e4d132b | /Checker/SignalCheck.py | 2cc1b065fafc01542c5779027af26776930e7c0d | [] | no_license | JefferyPaul/StrategyAnalyzer | bcd8baed306ab68f829143025db40178181d229b | 2c7dfc8d615cc8433df0b1108ecbb39b929e0ddc | refs/heads/master | 2020-04-07T20:11:41.002909 | 2018-09-10T01:04:39 | 2018-09-10T01:04:39 | 158,678,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,337 | py | import os
import pandas as pd
from datetime import *
from Checker.getConfig import get_config
from DataManager import get_data_path
from Shower.BandShower import BandShower
from Shower.TargetPositionShower import TargetPositionShower
def set_start_date(str_start):
if len(str_start) == 0:
start_date = datetime(2000, 1, 1)
else:
start_date = datetime.strptime(str_start, "%Y-%m-%d")
return start_date
def set_end_date(str_end):
if len(str_end) == 0:
end_date = datetime(2020, 1, 1)
else:
end_date = datetime.strptime(str_end, "%Y-%m-%d")
return end_date
'''
1 SimulationSignals VS SimulationSignals
2 SimulationSignals VS liveSignals
仅在 Trader-Signal.py中读取文件时的文件名有区分,数据结构和对比流程不存在区别
'''
if __name__ == '__main__':
#
# 1 参数获取
dict_config = get_config()
path_input_folder = dict_config["path_input_folder"]
path_output_folder = dict_config["path_output_folder"]
list_strategies = dict_config["strategies"]
list_traders = dict_config["traders"]
compare_match_mode = dict_config["compare_match_mode"]
show_mode = dict_config["show_mode"]
compare_mode = dict_config["compare_mode"]
dt_round_level = dict_config["dt_round_level"]
position_normal_or_std = dict_config["position_normal_or_std"]
if type(dict_config["start_date"]) == str:
start_date = set_start_date(dict_config["start_date"])
else:
start_date = dict_config["start_date"]
if type(dict_config["end_date"]) == str:
end_date = set_end_date((dict_config["end_date"]))
else:
end_date = dict_config["end_date"]
# 2 获取所需对比的策略的数据所在的目录
df_data_file_path = pd.DataFrame(
get_data_path(list_strategies, list_traders, path_input_folder))
print(df_data_file_path["Path"])
# 3 获取并整理数据
# 4 画图展示
py_start_time_t = datetime.now().strftime("%H:%M:%S")
py_start_time = datetime.now().strftime("%Y%m%d_%H%M%S")
print(py_start_time)
'''
根据compare需求,compare_match_mode分为:
1 不同strategy 同trader ticker比较
2 同strategy 不同trader ticker比较
3 不配对,逐一显示
'''
if compare_match_mode == "1":
df_data_file_path_gb = df_data_file_path.groupby("TraderA")
elif compare_match_mode == "2":
df_data_file_path_gb = df_data_file_path.groupby("Strategy")
elif compare_match_mode == "3":
df_data_file_path_gb = df_data_file_path.groupby("strategy_traderA")
else:
print("compare_match_mode is Wrong, changed in mode '3'")
df_data_file_path_gb = df_data_file_path.groupby("strategy_traderA")
# 遍历所有对比项
for invar_item, df_data_file_path_i in df_data_file_path_gb:
'''
show_mode分为:
1 TargetPosition 对比
2 Band 对比
3 Both
compare_mode分为:
Single : 单独一个trader也展示--展示所有path中的内容;
Compare: 仅当path中存在多于或等于2个 相同类型的trader时才展示,用于对比,不对比数据不足的内容。
'''
if show_mode == "1" or show_mode == "3":
tp = TargetPositionShower(invar_item, df_data_file_path_i, start_date, end_date, dt_round_level,
position_normal_or_std)
if compare_mode == "2":
grid = tp.show_target_position("Single")
else:
grid = tp.show_target_position("Compare")
if grid == "":
continue
output_path_folder = r"%s/%s" % (path_output_folder, py_start_time)
if not os.path.exists(output_path_folder):
os.mkdir(output_path_folder)
grid.render(
r"%s/%s-targetPosition.html" % (
output_path_folder,
invar_item)
)
print(" %s - TargetPosition Done " % invar_item)
if show_mode == "2" or show_mode == "3":
tp = BandShower(invar_item, df_data_file_path_i, start_date, end_date, dt_round_level)
if compare_mode == "2":
grid = tp.show_band("Signal")
else:
grid = tp.show_band("Compare")
if grid == "":
continue
output_path_folder = r"%s/%s" % (path_output_folder, py_start_time)
if not os.path.exists(output_path_folder):
os.mkdir(output_path_folder)
grid.render(
r"%s/%s-band.html" % (
output_path_folder,
invar_item)
)
print(" %s - band Done " % invar_item)
print(" Start at : %s " % py_start_time_t)
print(" Finished at : %s " % datetime.now().strftime("%H:%M:%S"))
print(" ALL FINISHED")
| [
"595837423@qq.com"
] | 595837423@qq.com |
e3f22ef6f52e667dd7bb0c49d12ce580026b23a1 | 56a0762c741bcac3ab1172eb6114a9e59a48a5df | /mensajes/urls.py | c121166b0614ecbba8e0d369dfd323fc6757e6d3 | [
"MIT"
] | permissive | jjmartinr01/gauss3 | 54af1735a035a566f237d8e0fd9a6fe4447845a2 | 41a23d35c763890d8f729c9d63ac073673689400 | refs/heads/master | 2023-08-23T06:40:51.033857 | 2023-08-08T11:50:50 | 2023-08-08T11:50:50 | 171,710,013 | 1 | 0 | MIT | 2023-02-15T18:43:56 | 2019-02-20T16:35:03 | HTML | UTF-8 | Python | false | false | 520 | py | # -*- coding: utf-8 -*-
from django.urls import path
from . import views
urlpatterns = [
path('correo/', views.correo),
path('responder_mensaje/', views.responder_mensaje),
path('mensaje_importante/', views.mensaje_importante),
path('enviados/', views.enviados),
path('recibidos/', views.recibidos),
path('ajax_mensajes/', views.ajax_mensajes),
path('borrar_avisos/', views.borrar_avisos),
path('get_avisos/', views.get_avisos),
path('redactar_mensaje/', views.redactar_mensaje),
]
| [
"jmar0269@gmail.com"
] | jmar0269@gmail.com |
fbeb3df0ad93d859ac3ddaaaa8cf30f6e5c85e3f | f3bd271bf00325881fb5b2533b9ef7f7448a75ec | /classes/_torsion5.py | 83b99fba600d303db2edd39535fc5154b743f3b9 | [] | no_license | obaica/xcp2k | 7f99fc9d494859e16b9b0ea8e217b0493f4b2f59 | 6e15c2c95658f545102595dc1783f5e03a9e6916 | refs/heads/master | 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | from xcp2k.inputsection import InputSection
from _point39 import _point39
class _torsion5(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Atoms = None
self.POINT_list = []
self._name = "TORSION"
self._keywords = {'Atoms': 'ATOMS'}
self._repeated_subsections = {'POINT': '_point39'}
self._aliases = {'Points': 'Atoms'}
self._attributes = ['POINT_list']
def POINT_add(self, section_parameters=None):
new_section = _point39()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.POINT_list.append(new_section)
return new_section
@property
def Points(self):
"""
See documentation for Atoms
"""
return self.Atoms
@Points.setter
def Points(self, value):
self.Atoms = value
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
b23442ddc1decb7f8b5ac16595474fb0958b92f7 | 26605ec8a8bdd64a45af7d444d097d9e2f832dc9 | /electrum_xazab/plugins/keepkey/cmdline.py | d9c0b09fa26ff935158f1a8c786bcf62bd7adc2b | [
"MIT"
] | permissive | nunumichael/electrum-xazab | b67f821fd4a19e924d8ad902f076223df9b7511f | f128c765f451b418a418f9cd8b8e24fd8f66df74 | refs/heads/master | 2023-05-05T05:30:03.935745 | 2021-05-26T19:12:47 | 2021-05-26T19:12:47 | 370,091,240 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | from electrum_xazab.plugin import hook
from .keepkey import KeepKeyPlugin
from ..hw_wallet import CmdLineHandler
class Plugin(KeepKeyPlugin):
handler = CmdLineHandler()
@hook
def init_keystore(self, keystore):
if not isinstance(keystore, self.keystore_class):
return
keystore.handler = self.handler
def create_handler(self, window):
return self.handler
| [
"71531505+xazab@users.noreply.github.com"
] | 71531505+xazab@users.noreply.github.com |
a61fb1cc5cb3816c0998f5905ac5942446afa481 | f246b414cce8687d6e5d1bb77cd94132b89580a2 | /commerce/auctions/migrations/0001_initial.py | 2a4e4b16150603161e70e2399aa01a2c0e0217c6 | [] | no_license | thewolfcommander/cs50-web | edbccd29b0b649852c7af73d5ecba4f51fa47ad3 | beead0967d36ef398b699601c8ebae646827556d | refs/heads/master | 2022-12-23T08:03:52.729833 | 2020-10-01T14:14:31 | 2020-10-01T14:14:31 | 300,309,278 | 2 | 2 | null | 2020-10-01T14:29:44 | 2020-10-01T14:29:42 | null | UTF-8 | Python | false | false | 5,777 | py | # Generated by Django 3.1 on 2020-09-06 02:37
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Title')),
('description', models.TextField(verbose_name='Description')),
('price', models.DecimalField(decimal_places=2, max_digits=15, verbose_name='Starting Bid')),
('image', models.URLField(blank=True, null=True, verbose_name='Image URL')),
('category', models.CharField(blank=True, choices=[('BOOKS', 'Books'), ('MUSIC', 'Music'), ('MOVIES', 'Movies'), ('GAMES', 'Games'), ('COMPUTERS', 'Computers'), ('ELECTRONICS', 'Electronics'), ('KITCHEN', 'Kitchen'), ('HOME', 'Home'), ('HEALTH', 'Health'), ('PETS', 'Pets'), ('TOYS', 'Toys'), ('FASHION', 'Fashion'), ('SHOES', 'Shoes'), ('SPORTS', 'Sports'), ('BABY', 'Baby'), ('TRAVEL', 'Travel')], max_length=200, null=True, verbose_name='Category')),
('creator', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='listings', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(default='', verbose_name='Comment')),
('timestamp', models.DateTimeField(auto_now_add=True, null=True)),
('commenter', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
('listing', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='auctions.listing')),
],
),
migrations.CreateModel(
name='Bid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True, null=True)),
('bid_price', models.DecimalField(decimal_places=2, max_digits=15, null=True, verbose_name='Bid Price')),
('bidder', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='bids', to=settings.AUTH_USER_MODEL)),
('listing', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='bids', to='auctions.listing')),
],
),
migrations.AddField(
model_name='user',
name='watchlist',
field=models.ManyToManyField(blank=True, related_name='watchlist', to='auctions.Listing'),
),
]
| [
"nstu778@aucklanduni.ac.nz"
] | nstu778@aucklanduni.ac.nz |
6b4bbf999b1e3971e8818a0b461d50577e4be523 | 3803bbc41c561b80d3ff6b79d45f00a29d868706 | /src/13ionetcdf/abinit.src | 51df8130fc9ba981594670600ac33c9a5c41ecc4 | [] | no_license | RADI-ux/abinit-cmake | 820cc3d6887b9e57b515d493c1a4cdc55e646dea | 54f0edb964b4f0153db532c7687db10e70ea80e2 | refs/heads/master | 2020-09-05T05:05:09.372589 | 2009-02-22T20:59:57 | 2009-02-22T22:43:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | src | # -*- Python -*-
#
# Copyright (c) 2005-2008 ABINIT Group (Yann Pouillon)
# All rights reserved.
#
# This file is part of the ABINIT software package. For license information,
# please see the COPYING file in the top-level directory of the ABINIT source
# distribution.
#
#
# Source files making up the 3ionetcdf library
#
# Source file attributes
ABI_SRC_NIL = 0 # No attribute
ABI_SRC_BLT = 1 # The file is built by a script
ABI_SRC_DEP = 2 # The file depends on other files
# Source files
sources = [
"abi_etsf_init.F90",
"abi_etsf_electrons_put.F90",
"abi_etsf_geo_put.F90",
"handle_err_netcdf.F90",
"hdr_io_etsf.F90",
"ini_wf_etsf.F90",
"ini_wf_netcdf.F90",
"write_header_moldynnetcdf.F90",
"write_moldynvaluenetcdf.F90"]
| [
"ondrej@certik.cz"
] | ondrej@certik.cz |
d6caa39c10d52e910422dd0d943485dcab3e9a75 | e5799f58d30361dd783f2932474c86cb7b0bbab0 | /calculator/views.py | 7a272276e19d3f6cd0deeeca93028fab19865b45 | [] | no_license | Shekharnunia/simple-calculator | a28aacfede97e9022bc7e9e59b278b8e62a681a3 | 006c8196acd5284e28cbe451ab5a5b0314975378 | refs/heads/master | 2022-11-18T05:43:32.816169 | 2020-07-20T11:28:51 | 2020-07-20T11:28:51 | 281,097,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py | import operator
from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
class CalculatorAPIView(APIView):
def post(self, request, *args, **kwargs):
print(request.data)
value1 = int(request.data.get("value1", None))
value2 = int(request.data.get("value2", None))
operation = request.data.get("operation", 1)
ops = {'+' : operator.add, '-' : operator.sub, '*' : operator.mul}
if operation in ops:
output = ops[operation](value1, value2)
return Response(output, status=status.HTTP_200_OK)
return Response(status=status.HTTP_400_BAD_REQUEST)
def get(self, request, format=False):
value1 = int(request.GET.get("value1", None))
value2 = int(request.GET.get("value2", None))
operation = request.GET.get("operation", 1)
ops = {'add' : operator.add, '-' : operator.sub, '*' : operator.mul}
print(value1, value2, operation)
print(len(operation))
print(operation in ops)
if operation in ops:
output = ops[operation](value1, value2)
return Response(output, status=status.HTTP_200_OK)
return Response(status=status.HTTP_400_BAD_REQUEST)
| [
"shekharnunia@gmail.com"
] | shekharnunia@gmail.com |
f66ad320b398c7d248337244ae82393096b99540 | 268c588de53d48f2e48c694535e27c1be104229d | /Adapter_Pattern.py | b5fd93232e521c89f70478c8b19fe8abe2a22854 | [] | no_license | wax8280/Python_Design_Patterns | def64b1662924807946a9847ac1bf0437382a716 | 88fb08ad3605fb06166bf45d814f5b85a37364b5 | refs/heads/master | 2021-01-11T01:21:14.964828 | 2016-10-14T15:40:42 | 2016-10-14T15:40:42 | 70,715,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | # coding:utf-8
class Synthesizer:
def __init__(self, name):
self.name = name
def __str__(self):
return 'the {} synthesizer'.format(self.name)
def play(self):
return 'is playing an electronic song'
class Human:
def __init__(self, name):
self.name = name
def __str__(self):
return '{} the human'.format(self.name)
def speak(self):
return 'says hello'
class Computer:
def __init__(self, name):
self.name = name
def __str__(self):
return 'the {} computer'.format(self.name)
def execute(self):
return 'executes a program'
class Adapter:
def __init__(self, obj, adapted_methods):
self.obj = obj
self.__dict__.update(adapted_methods)
def __str__(self):
return str(self.obj)
def main():
objects = [Computer('Asus')]
synth = Synthesizer('moog')
human = Human('Bob')
objects.append(Adapter(human, dict(execute=human.speak)))
objects.append(Adapter(synth, dict(execute=synth.play)))
for i in objects:
print('{} {}'.format(str(i), i.execute()))
if __name__ == "__main__":
main()
| [
"wax8280@163.com"
] | wax8280@163.com |
7d4aff7df2367bb22dc9f41b31a08713bd0699f1 | d190750d6cb34e9d86ae96724cf4b56a2f57a74a | /tests/r/test_penicillin.py | 0cb436b425b437de661d400636d1bc4084227041 | [
"Apache-2.0"
] | permissive | ROAD2018/observations | a119f61a48213d791de0620804adb8d21c2ad9fb | 2c8b1ac31025938cb17762e540f2f592e302d5de | refs/heads/master | 2021-09-24T04:28:02.725245 | 2018-09-16T23:06:30 | 2018-09-16T23:06:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.penicillin import penicillin
def test_penicillin():
"""Test module penicillin.py by downloading
penicillin.csv and testing shape of
extracted data has 144 rows and 3 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = penicillin(test_path)
try:
assert x_train.shape == (144, 3)
except:
shutil.rmtree(test_path)
raise()
| [
"dustinviettran@gmail.com"
] | dustinviettran@gmail.com |
70e519043cf0a431f68a0786fbad374223501e77 | 3da69696601b2b3ad7bc1285a5f0343c7eafea80 | /lc888.py | 9b44f944a4450fb681eae24b3aa24a73adda0b00 | [] | no_license | GeorgyZhou/Leetcode-Problem | ee586463a2e4e75c910c095bdc057f1be70b5c1b | d6fac85a94a7188e93d4e202e67b6485562d12bd | refs/heads/master | 2021-06-30T15:58:04.698200 | 2020-12-18T22:55:49 | 2020-12-18T22:55:49 | 66,054,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | class Solution(object):
def fairCandySwap(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
sum_a = sum(A)
sum_b = sum(B)
target = (sum_a + sum_b) / 2
diff = target - sum_b
set_a = set(A)
for b in B:
if b + diff in set_a:
return [b + diff, b]
| [
"michaelchouqj@gmail.com"
] | michaelchouqj@gmail.com |
f65794dc3bfb3a87865b42bd60309bdff9092190 | 28a462a28f443c285ca5efec181ebe36b147c167 | /tests/compile/basic/es2020/IterationStatement[0,0].LabelledEvaluation.spec | 24b97f4ffaca37ee1ec9e0e2e4054f282340c901 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kaist-plrg/jstar | 63e71f9156860dc21cccc33a9f6c638dfee448ea | 1282919127ea18a7e40c7a55e63a1ddaaf7d9db4 | refs/heads/main | 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 | NOASSERTION | 2022-02-27T11:05:26 | 2021-07-08T07:53:21 | Python | UTF-8 | Python | false | false | 556 | spec | 1. Let _V_ be *undefined*.
1. Repeat,
1. Let _stmtResult_ be the result of evaluating |Statement|.
1. If LoopContinues(_stmtResult_, _labelSet_) is *false*, return Completion(UpdateEmpty(_stmtResult_, _V_)).
1. If _stmtResult_.[[Value]] is not ~empty~, set _V_ to _stmtResult_.[[Value]].
1. Let _exprRef_ be the result of evaluating |Expression|.
1. Let _exprValue_ be ? GetValue(_exprRef_).
1. If ! ToBoolean(_exprValue_) is *false*, return NormalCompletion(_V_). | [
"h2oche22@gmail.com"
] | h2oche22@gmail.com |
c41f02c8804faa4da500fd7196ad6460a39b89d9 | 8d49683cd799ed66bc9dd197c197d1e1c7a73120 | /src/gamesbyexample/tutorialguess2.py | dc65f038f09cfaa639c5bba236608793939f3f78 | [
"MIT"
] | permissive | trujilloshane/PythonStdioGames | d2e53ec22121b30e4b5317e46ed685831492d9c3 | 83ac9cd367f688539b77f67f0d44433fc6fdcbdf | refs/heads/master | 2020-12-13T15:12:26.917070 | 2020-01-06T09:38:39 | 2020-01-06T09:38:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | # Tutorial: Guess the Number, by Al Sweigart al@inventwithpython.com
# Part 2 of a tutorial to make a "Guess the Number" game, bit by bit.
# Try copying the code in this program on your own and running the
# program before moving on to part 3. (You don't have to copy the
# comments.)
import random
secretNumber = random.randint(1, 20)
print('Hello! What is your name?')
playerName = input()
print('It is good to meet you, ' + playerName)
print('I am thinking of a number from 1 to 20.')
print('Take a guess.')
guess = input()
print('My secret number was', secretNumber)
| [
"asweigart@gmail.com"
] | asweigart@gmail.com |
919de8bda5555e026279ff964d9c8272e55f685d | 8da91c26d423bacbeee1163ac7e969904c7e4338 | /pyvisdk/do/performance_statistics_description.py | 0f598e45cec3f2d9bde0398e97bdd6052bb9f0e3 | [] | no_license | pexip/os-python-infi-pyvisdk | 5d8f3a3858cdd61fb76485574e74ae525cdc7e25 | 1aadea0afbc306d09f6ecb9af0e683dbbf961d20 | refs/heads/master | 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def PerformanceStatisticsDescription(vim, *args, **kwargs):
'''Data object to capture all information needed to describe a sample inventory.'''
obj = vim.client.factory.create('{urn:vim25}PerformanceStatisticsDescription')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'intervals', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"jmb@pexip.com"
] | jmb@pexip.com |
5774b8d69dcd3e00d299f03348bf1b4f69ab5b72 | 6a6d8c0c8ddd6f5a1c03788f35320dd4b82314ea | /yamtbx/command_line/resolution_shells.py | cf6f26cf672b843e46f52bd2e3242f51bb60570a | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause",
"MIT"
] | permissive | nsls-ii-mx/yamtbx | b817a131a8f6f515db99bc1743f81218997ac4ed | 311cf5a20e27a035a9e89c2abcb3c7d5e3684d67 | refs/heads/master | 2021-01-11T12:05:38.166937 | 2017-01-24T16:26:44 | 2017-01-24T16:26:44 | 76,574,177 | 1 | 0 | null | 2016-12-15T16:00:06 | 2016-12-15T16:00:06 | null | UTF-8 | Python | false | false | 1,555 | py | # LIBTBX_SET_DISPATCHER_NAME yamtbx.resolution_shells
def run(d_max, d_min, nbins, power, quiet=False):
step = ( d_min**(-power) - d_max**(-power) ) / float(nbins)
start = 1./(d_max**power)
d_vals = map(lambda x: (start + x * step)**(-1./power), xrange(nbins+1))
if not quiet:
print "%d resolution shells (%.3f - %.3f A) split by 1/d^%d" % (nbins, d_max, d_min, power)
print " ".join(map(lambda x: "%.3f"%x, d_vals))
print
print "For XSCALE,"
print " RESOLUTION_SHELLS= %s" % (" ".join(map(lambda x: "%.3f"%x, d_vals[1:])))
return d_vals
# run()
if __name__ == "__main__":
import sys
import optparse
parser = optparse.OptionParser(prog="yamtbx.resolution_shells",
description="Show resolution shells",
usage="usage: %prog [options] d_max d_min")
parser.add_option("-n","--nshells", action="store", dest="nbins", type=int, default=9,
help="Number of shells (default: 9)")
parser.add_option("-p","--power", action="store", dest="pow", type=int, default=2,
help="Split shells by 1/d^power. 2: xds style (default); 3: scalepack style")
opts, args = parser.parse_args(sys.argv[1:])
if len(args) != 2:
parser.print_help()
quit()
try:
d_max, d_min = map(float, args)
except:
parser.print_help()
quit()
if d_max < d_min: d_max, d_min = d_min, d_max
run(d_max, d_min, opts.nbins, opts.pow)
| [
"keitaroyam@users.noreply.github.com"
] | keitaroyam@users.noreply.github.com |
6dbd9a7835aecb05ef4225d9b2774b2348f87fd2 | 1b3c32f1de0b0fb88f181ae1e1f47f00fcea576f | /setup.py | 2e3c13130fccf12524868690d84853acdaa41aa7 | [
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unicode"
] | permissive | source-foundry/font-unicode | 0df33bda7774c926b1258e7003962a62ab2a1b4f | 74cc56f5674f41ee09f47f2c8f3dda0349a1ff73 | refs/heads/master | 2021-01-10T03:40:43.528760 | 2016-02-15T03:46:19 | 2016-02-15T03:46:19 | 43,691,845 | 8 | 2 | null | 2017-03-05T21:18:33 | 2015-10-05T15:08:31 | Python | UTF-8 | Python | false | false | 2,314 | py | import os
import re
from setuptools import setup, find_packages
def docs_read(fname):
return open(os.path.join(os.path.dirname(__file__), 'docs', fname)).read()
def version_read():
settings_file = open(os.path.join(os.path.dirname(__file__), 'lib', 'fontunicode', 'settings.py')).read()
major_regex = """major_version\s*?=\s*?["']{1}(\d+)["']{1}"""
minor_regex = """minor_version\s*?=\s*?["']{1}(\d+)["']{1}"""
patch_regex = """patch_version\s*?=\s*?["']{1}(\d+)["']{1}"""
major_match = re.search(major_regex, settings_file)
minor_match = re.search(minor_regex, settings_file)
patch_match = re.search(patch_regex, settings_file)
major_version = major_match.group(1)
minor_version = minor_match.group(1)
patch_version = patch_match.group(1)
if len(major_version) == 0:
major_version = 0
if len(minor_version) == 0:
minor_version = 0
if len(patch_version) == 0:
patch_version = 0
return major_version + "." + minor_version + "." + patch_version
setup(
name='font-unicode',
version=version_read(),
description='Command line Unicode character code point and character name search',
long_description=(docs_read('README.rst')),
url='https://github.com/source-foundry/font-unicode',
license='MIT license',
author='Christopher Simpkins',
author_email='chris@sourcefoundry.org',
platforms=['any'],
entry_points = {
'console_scripts': [
'font-unicode = fontunicode.app:main'
],
},
packages=find_packages("lib"),
package_dir={'': 'lib'},
install_requires=['commandlines'],
keywords='unicode, font, fonts, typeface, typefaces, type, type design, type development, character, code point, name, search',
include_package_data=True,
classifiers=[
'Intended Audience :: End Users/Desktop',
'Development Status :: 4 - Beta',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows'
],
) | [
"git.simpkins@gmail.com"
] | git.simpkins@gmail.com |
1269ca1daa7c5c39a3505ef9cf0ed5ba02d2c6ff | 09409b6910f7d58a28e46b90b111d5ff3d5442cc | /VSRTorch/Framework/Environment.py | ee43920616307bf06becbc22dbd67ab4f8913c6b | [
"MIT"
] | permissive | moyulization/VideoSuperResolution | 5600ae1cc0638226c3f5683b84e6731ba5e56f10 | dc8bf94aa65c1a4e92e6024ca77b402f5b252fcf | refs/heads/master | 2020-05-23T04:49:06.309103 | 2019-05-07T11:31:24 | 2019-05-07T11:31:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,557 | py | # Copyright (c): Wenyi Tang 2017-2019.
# Author: Wenyi Tang
# Email: wenyi.tang@intel.com
# Update Date: 2019/5/7 下午5:21
import logging
from pathlib import Path
import numpy as np
import torch
def _make_ckpt_name(name, step):
return '{}_ep{:04d}.pth'.format(name, step)
def _parse_ckpt_name(name):
if not name:
return 0
model_name, epochs = Path(name).stem.split('.')[0].split('_')
return int(epochs[2:])
class Env:
"""Pytorch model runtime Env-ironment.
Args:
model: a Model object (note it's NOT nn.Module), representing a container
of multiple nn.Module objects. See `VSRTorch.Models.Model` for details.
work_dir: a folder path, working directory of this environment.
log_level: logging verbosity level.
pre_train_model: (optional) a path to .pth file to restore the model.
Usage:
Use `with` syntax to enter the Env:
>>> with Env(...) as e: ...
"""
def __init__(self, model, work_dir, log_level='DEBUG', pre_train_model=None):
self._m = model
self._saved = Path(work_dir) / 'save'
self._logd = Path(work_dir) / 'log'
self._restored = False
self._logger = logging.getLogger("VSR")
self._logger.setLevel(log_level)
self._pth = Path(pre_train_model or '')
def _startup(self):
self._saved.mkdir(parents=True, exist_ok=True)
self._logd.mkdir(parents=True, exist_ok=True)
if not self._pth.exists() or not self._pth.is_file():
self._pth = None
if self._logger.isEnabledFor(logging.DEBUG):
hdl = logging.FileHandler(self._logd / 'training.txt')
self._logger.addHandler(hdl)
def _close(self):
pass
def __enter__(self):
"""Create session of tensorflow and build model graph"""
self._startup()
self.model.display()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Close session"""
self._close()
@property
def model(self):
return self._m
def _find_last_ckpt(self, pattern):
# restore the latest checkpoint in save dir
# sort as modification time
ckpt = sorted(self._saved.glob(pattern), key=lambda x: x.stat().st_mtime_ns)
return ckpt[-1].resolve() if ckpt else None
def _restore_model(self, epoch=None, pth=None, map_location=None):
last_epoch = 0
for key, model in self.model.modules.items():
if pth is None:
if epoch is None:
ckpt = f'*{key}*.pth'
else:
ckpt = _make_ckpt_name(key, epoch)
fp = self._find_last_ckpt(ckpt)
else:
fp = pth
if fp:
self._logger.info(f"Restoring params for {key} from {fp}.")
try:
last_epoch = max(_parse_ckpt_name(str(fp)), last_epoch)
except ValueError:
last_epoch = 0
try:
model.load_state_dict(torch.load(str(fp), map_location=map_location))
except RuntimeError:
self._logger.warning(f"Couldn't restore state for {key} from {fp}.")
if pth is None:
for key, opt in self.model.opts.items():
fp = self._saved / f'{key}.pth'
try:
opt.load_state_dict(torch.load(str(fp)))
except (ValueError, FileNotFoundError):
self._logger.warning(f"trying to restore state for optimizer {key}, "
"but failed.")
return last_epoch
def _save_model(self, step):
for key, model in self.model.modules.items():
fp = self._saved / _make_ckpt_name(key, step)
torch.save(model.state_dict(), str(fp))
for key, opt in self.model.opts.items():
fp = self._saved / f'{key}.pth'
torch.save(opt.state_dict(), str(fp))
def _restore(self, epoch=None, map_location=None):
# restore graph
if self._restored:
return self.last_epoch
self.last_epoch = self._restore_model(epoch, self._pth, map_location)
self._restored = True
return self.last_epoch
def set_seed(self, seed):
"""set a seed for RNG
Note: RNG in torch and numpy is different.
"""
np.random.seed(seed)
torch.manual_seed(seed)
def export(self, export_dir='.', version=1):
"""export ONNX model.
Args:
export_dir: path to save onnx files.
version: (optional) a child-folder to control output versions.
"""
export_path = Path(export_dir) / str(version)
while export_path.exists():
version += 1 # step ahead 1 version
export_path = Path(export_dir) / str(version)
export_path.mkdir(exist_ok=False, parents=True)
self.model.export(export_path)
self._logger.info(f"Export ONNX to {str(export_path)}")
| [
"twytwy12345@live.com"
] | twytwy12345@live.com |
4b7b32f5c4bdb6bfa3a50322f262394885ae6996 | 139af68b78734a6bc53bd942ffa05476baf3d71d | /Python Basic 2020/scholarship.py | 46bf7b78b9d1c70997de4b88288fd892fcd3cf19 | [] | no_license | MiroVatov/Python-SoftUni | 7fe3fc0a3928848c5317fb120f789c773bfc117e | 0d0d6f116281b4de8c413d254386e27d992d047b | refs/heads/main | 2023-08-24T09:44:31.261137 | 2021-10-18T14:04:03 | 2021-10-18T14:04:03 | 317,510,574 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 961 | py | import math
income = float(input())
average_grades = float(input())
min_salary = float(input())
social_scholarship = math.floor(0.35 * min_salary)
scholarship = math.floor(average_grades * 25)
if income < min_salary:
if 4.5 >= average_grades > 5.50:
print(f'You get a Social scholarship {social_scholarship} BGN')
elif average_grades < 4.5:
print('You cannot get a scholarship!')
if income > min_salary:
if average_grades <= 5.5:
print('You cannot get a scholarship!')
elif average_grades >= 5.5:
print(f'You get a scholarship for excellent results {scholarship} BGN')
if income < min_salary:
if average_grades >= 5.5:
if social_scholarship > scholarship:
print (f'You get a Social scholarship {social_scholarship} BGN')
elif scholarship > social_scholarship:
print(f'You get a scholarship for excellent results {scholarship} BGN')
| [
"noreply@github.com"
] | MiroVatov.noreply@github.com |
923b08f989cc5e5cf10261fb953a99ce009c5723 | 738b6d6ec4572f5848940b6adc58907a03bda6fb | /tests/pymcell4_positive/1910_get_molecule_ids_w_pattern/model.py | 4028ae43db75f8bc3d72eae558a1763e960ec613 | [
"Unlicense",
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | mcellteam/mcell_tests | 09cd1010a356e0e07c88d7e044a73c5606c6e51a | 34d2d967b75d56edbae999bf0090641850f4f4fe | refs/heads/master | 2021-12-24T02:36:24.987085 | 2021-09-24T14:19:41 | 2021-09-24T14:19:41 | 174,733,926 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,800 | py | #!/usr/bin/env python3
# WARNING: This is an automatically generated file and will be overwritten
# by CellBlender on the next model export.
import sys
import os
MODEL_PATH = os.path.dirname(os.path.abspath(__file__))
# ---- import mcell module located in directory ----
# ---- specified by system variable MCELL_PATH ----
MCELL_PATH = os.environ.get('MCELL_PATH', '')
if MCELL_PATH:
lib_path = os.path.join(MCELL_PATH, 'lib')
if os.path.exists(os.path.join(lib_path, 'mcell.so')) or \
os.path.exists(os.path.join(lib_path, 'mcell.pyd')):
sys.path.append(lib_path)
else:
print("Error: Python module mcell.so or mcell.pyd was not found in "
"directory '" + lib_path + "' constructed from system variable "
"MCELL_PATH.")
sys.exit(1)
else:
print("Error: system variable MCELL_PATH that is used to find the mcell "
"library was not set.")
sys.exit(1)
import mcell as m
import mcell as m
# ---- model parameters ----
# load parameters from BNGL
params = m.bngl_utils.load_bngl_parameters('model.bngl')
# ---- simulation setup ----
ITERATIONS = 1
TIME_STEP = 1e-06
DUMP = False
EXPORT_DATA_MODEL = True
SEED = 1
# create main model object
model = m.Model()
model.load_bngl('model.bngl')
# ---- configuration ----
model.config.time_step = TIME_STEP
model.config.seed = SEED
model.config.total_iterations = ITERATIONS
model.notifications.rxn_and_species_report = False
model.config.partition_dimension = 2
model.config.subpartition_dimension = 0.2
model.initialize()
model.run_iterations(1)
num_AR = params['num_AR']
num_AS = params['num_AS']
num_BT = params['num_BT']
num_BU = params['num_BU']
num_ASBT = params['num_ASBT']
num_ARBU = params['num_ARBU']
ids_A = model.get_molecule_ids(pattern = m.Complex('A'))
ids_B = model.get_molecule_ids(pattern = m.Complex('B'))
ids_AR = model.get_molecule_ids(pattern = m.Complex('A(a~R)'))
ids_AS = model.get_molecule_ids(pattern = m.Complex('A(a~S)'))
ids_BT = model.get_molecule_ids(pattern = m.Complex('B(b~T)'))
ids_BU = model.get_molecule_ids(pattern = m.Complex('B(b~U)'))
ids_ASBT = model.get_molecule_ids(pattern = m.Complex('A(a~S,b!1).B(b~T,a!1)'))
ids_ARBU = model.get_molecule_ids(pattern = m.Complex('A(a~R,b!1).B(b~U,a!1)'))
ids_AB = model.get_molecule_ids(pattern = m.Complex('A(b!1).B(a!1)'))
assert len(ids_A) == num_AR + num_AS + num_ASBT + num_ARBU
assert len(ids_B) == num_BT + num_BU + num_ASBT + num_ARBU
assert len(ids_AR) == num_AR + num_ARBU
assert len(ids_AS) == num_AS + num_ASBT
assert len(ids_BT) == num_BT + num_ASBT
assert len(ids_BU) == num_BU + num_ARBU
assert len(ids_ASBT) == num_ASBT
assert len(ids_ARBU) == num_ARBU
assert len(ids_AB) == num_ASBT + num_ARBU
model.end_simulation()
| [
"ahusar@salk.edu"
] | ahusar@salk.edu |
ae8e9c862609271c8b66204f40532e2e21281027 | b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e | /pyfile/pyfm-011/pyfmlight.py | 7ddadbd3b74d728a4c752e8270718342b39d2480 | [] | no_license | pglen/pgpygtk | 4d1405478a714f003984cf3e3db04ff1f767470b | 33f58010e304f1a312f2356de453ecedb7aa21ef | refs/heads/master | 2021-01-22T01:18:52.238415 | 2019-01-01T01:37:24 | 2019-01-01T01:37:24 | 102,215,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | #!/usr/bin/env python
# 3D File Manager in Python OpenGL, light helper routines
#
#import math, sys, rand
#import gtk.gtkgl
from OpenGL.GL import *
from OpenGL.GLU import *
def light(self):
# Lighting properties.
#light_ambient = [0.0, 0.0, 0.0, 1.0]
#light_ambient = [1.0, 1.0, 1.0, 1.0]
light_ambient = [0.5, 0.5, 0.5, 1.0]
#light_diffuse = [0.0, 0.0, 0.0, 1.0]
light_diffuse = [0.5, 0.5, 0.5, 1.0]
#light_diffuse = [1.0, 1.0, 1.0, 1.0]
light_specular = [.5, .5, .5, 1.0]
#light_specular = [1.0, 1.0, 1.0, 1.0]
#light_specular = [.2, .2, .2, 1.0]
#light_position = [1.0, 1.0, 1.0, 1.0]
#light_position = [0.0, 5.0, 5.0, 0.0]
light_position = [0.0, 0.0, 1.0, 0.0]
#light_position = [5.0, 5.0, 5.0, 0.0]
#light_model_ambient = [0.2, 0.2, 0.2, 1.0]
light_model_ambient = [0.5, 0.5, 0.5, 1.0]
#light_model_ambient = [0.9, 0.9, 0.9, 1.0]
light_local_view = 0.0
#pos = (5.0, 5.0, 5.0, 0.0)
# Initialise the lighting properties.
glLightfv (GL_LIGHT0, GL_AMBIENT, light_ambient)
glLightfv (GL_LIGHT0, GL_DIFFUSE, light_diffuse)
glLightfv (GL_LIGHT0, GL_SPECULAR, light_specular)
glLightfv (GL_LIGHT0, GL_POSITION, light_position)
#glLightModelfv (GL_LIGHT_MODEL_AMBIENT, light_model_ambient)
#glLightModelf (GL_LIGHT_MODEL_LOCAL_VIEWER, light_local_view)
glEnable (GL_LIGHTING)
glEnable (GL_LIGHT0)
glEnable (GL_DEPTH_TEST)
glClearColor(.0, .0, .0, 1.0)
#glClearColor(.5, .5, .5, 1.0)
#glClearColor(1.0, 1.0, 1.0, 1.0)
glClearDepth(1.0)
| [
"peterglen99@gmail.com"
] | peterglen99@gmail.com |
a35fdea02255d765351560a273e9c3223a934f95 | 4293c8d2e0e8eb7d21e2706ecfdbbe6d80244f5d | /pfurl/tests/test_message.py | 2758baaaaefb60743929b3a867b21ba13d954cc3 | [
"MIT"
] | permissive | FNNDSC/pfurl | 52352a4c9085ee620e509bd5e0b20c82913e52ad | c37e57b5dc03a81a15e566f2d325a7dd1047ac10 | refs/heads/master | 2021-11-22T22:25:54.776531 | 2021-10-28T17:01:27 | 2021-10-28T17:01:27 | 87,982,205 | 1 | 10 | MIT | 2021-01-18T07:40:41 | 2017-04-11T21:45:59 | Python | UTF-8 | Python | false | false | 2,860 | py | from unittest import TestCase
from pfurl import Message
from pfurl import Colors
class TestMessage(TestCase):
def test_message_constructor(self):
message1 = Message()
message2 = Message()
message1.syslog(True)
message1(Colors.RED + Colors.WHITE_BCKGRND + 'hello world!\n' + Colors.NO_COLOUR)
# Send message via datagram to 'pangea' on port '1701'.
# message1.to('pangea:1701')
# message1('hello, pangea!\n');
# message1('this has been sent over a datagram socket...\n')
# Now for some column width specs and 'debug' type messages
# These will all display on the console since debug=5 and the
# message1.verbosity(10) means that all debug tagged messages with
# level less-than-or-equal-to 10 will be passed.
message1.to('stdout')
message1.verbosity(10)
message1('starting process 1...', lw=90, debug=5)
message1('[ ok ]\n', rw=20, syslog=False, debug=5)
message1('parsing process 1 outputs...', lw=90, debug=5)
message1('[ ok ]\n', rw=20, syslog=False, debug=5)
message1('preparing final report...', lw=90, debug=5)
message1('[ ok ]\n', rw=20, syslog=False, debug=5)
message2.to('/tmp/message2.log')
message2.tee(True)
# A verbosity level of message2.verbosity(1) and a
# message2.to(sys.stdout) will not output any of the
# following since the debug level for each message
# is set to '5'. The verbosity should be at least
# message2.verbosity(5) for output to appear on the
# console.
#
# If message2.tee(True) and message2.to('/tmp/message2.log')
# then all messages will be displayed regardless
# of the internal verbosity level.
message2.verbosity(1)
message2('starting process 1...', lw=90, debug=5)
message2('[ ok ]\n', rw=20, syslog=False, debug=5)
message2('parsing process 1 outputs...', lw=90, debug=5)
message2('[ ok ]\n', rw=20, syslog=False, debug=5)
message2('preparing final report...', lw=90, debug=5)
message2('[ ok ]\n', rw=20, syslog=False, debug=5)
message1.to('/tmp/test.log')
message1('and now to /tmp/test.log\n')
message2.to(open('/tmp/test2.log', 'a'))
message2('another message to /tmp/test2.log\n')
message2.tagstring('MARK-->')
message2('this text is tagged\n')
message2('and so is this text\n')
message1.clear()
message1.append('this is message ')
message1.append('that is constructed over several ')
message1.append('function calls...\n')
message1.to('stdout')
message1()
message2.tag(False)
message2('goodbye!\n')
# didn't crash
self.assertTrue(True)
| [
"rudolph.pienaar@gmail.com"
] | rudolph.pienaar@gmail.com |
5b607d6b9cee4ca29ffb02c954d4974d9d659227 | eb297ff1e0011438fd184cc338b3fb86859b81c9 | /Chapter 2/2-09.py | f6650c9ed58648f9a33f971fed80805763e78249 | [] | no_license | mynameisbenzo/PythonCrashCourse | c73a4505d9cdfe4df78e3ed01adb3491debf8a9b | 831a9962a3c6cab53ecfdb1d2cceb0dd2d9c5a0a | refs/heads/master | 2021-04-12T08:13:51.772957 | 2018-05-02T05:54:57 | 2018-05-02T05:54:57 | 126,091,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | # adding whitespace is fine, but what about taking whitespace out?
favLanguage = 'python '
# whether you see it or not, that space at the end of python is currently being printed
print(favLanguage)
# let's strip that space with rstrip()
favLanguage = favLanguage.rstrip()
# now instead of printing 'python ', we are currently printing 'python'
print(favLanguage)
# using lstrip()
favLanguage = ' python'
favLanguage = favLanguage.lstrip()
# will print 'python' not ' python'
print(favLanguage)
# using strip()
favLanguage = ' python'
favLanguage = favLanguage.strip()
# will print 'python' not ' python '
print(favLanguage) | [
"lorhernandez@csumb.edu"
] | lorhernandez@csumb.edu |
358135f3c10986d829cb49c921930c45ce321063 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-1-1.py | b2bf10a8b4a038f05f850d22b1b89862949d679d | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 333 | py | from output.models.nist_data.atomic.negative_integer.schema_instance.nistschema_sv_iv_atomic_negative_integer_min_exclusive_1_xsd.nistschema_sv_iv_atomic_negative_integer_min_exclusive_1 import NistschemaSvIvAtomicNegativeIntegerMinExclusive1
obj = NistschemaSvIvAtomicNegativeIntegerMinExclusive1(
value=-999999999999999998
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
7d7bef5c7b17b9b279306a74373faadcaf8172a8 | e20a786fb69b361e1ddfa509df63713371fa1eae | /examples/random_bot_example.py | eb37b882de7d027d8f75eefe133e8e5fa9a3bbe4 | [
"Apache-2.0"
] | permissive | greentec/ffai | d7fec6192c75f996c77f714cef7d76e06d44b0af | 3a966a395e5d48c94377cf8dd367897f205d3f9b | refs/heads/master | 2020-06-19T10:34:14.722778 | 2019-06-24T14:15:20 | 2019-06-24T14:15:20 | 196,679,040 | 1 | 0 | Apache-2.0 | 2019-07-13T04:42:58 | 2019-07-13T04:42:57 | null | UTF-8 | Python | false | false | 1,980 | py | #!/usr/bin/env python3
from ffai.core.game import *
from ffai.core.model import *
from ffai.ai.registry import register_bot, make_bot
import numpy as np
class MyRandomBot(Agent):
def __init__(self, name, seed=None):
super().__init__(name)
self.my_team = None
self.rnd = np.random.RandomState(seed)
def new_game(self, game, team):
self.my_team = team
def act(self, game):
# Select a random action type
while True:
action_choice = self.rnd.choice(game.state.available_actions)
# Ignore PLACE_PLAYER actions
if action_choice.action_type != ActionType.PLACE_PLAYER:
break
# Select a random position and/or player
pos = self.rnd.choice(action_choice.positions) if len(action_choice.positions) > 0 else None
player = self.rnd.choice(action_choice.players) if len(action_choice.players) > 0 else None
# Make action object
action = Action(action_choice.action_type, pos=pos, player=player)
# Return action to the framework
return action
def end_game(self, game):
pass
# Register the bot to the framework
register_bot('my-random-bot', MyRandomBot)
if __name__ == "__main__":
# Load configurations, rules, arena and teams
config = get_config("ff-11-bot-bowl-i.json")
ruleset = get_rule_set(config.ruleset)
arena = get_arena(config.arena)
home = get_team_by_id("human-1", ruleset)
away = get_team_by_id("human-2", ruleset)
config.competition_mode = False
config.debug_mode = False
# Play 10 games
game_times = []
for i in range(10):
away_agent = make_bot("my-random-bot")
home_agent = make_bot("my-random-bot")
game = Game(i, home, away, home_agent, away_agent, config, arena=arena, ruleset=ruleset)
game.config.fast_mode = True
print("Starting game", (i+1))
game.init()
print("Game is over")
| [
"njustesen@gmail.com"
] | njustesen@gmail.com |
2c078cc28bf5d3b25dce2dc028c83c1da2334d13 | 7006366dffa1576d54d5b8e619de10d999f9ccd7 | /application.py | 12d8f3fe68f0a4783d6ab98688fc638405061705 | [] | no_license | gaolinjie/webeta | 47e01d98cf20cd892b5005048d9729480e3ead2c | 3038e36abda5118be2b1075ca93f57b79da370b9 | refs/heads/master | 2021-01-10T11:14:34.450441 | 2016-04-09T17:07:04 | 2016-04-09T17:07:04 | 53,676,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,246 | py | #!/usr/bin/env python
# coding=utf-8
#
# Copyright 2016 webeta
# cat /etc/mime.types
# application/octet-stream crx
import sys
reload(sys)
sys.setdefaultencoding("utf8")
import os.path
import re
import memcache
import torndb
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import handler.index
from tornado.options import define, options
from lib.loader import Loader
from lib.session import Session, SessionManager
from jinja2 import Environment, FileSystemLoader
define("port", default = 80, help = "run on the given port", type = int)
define("mysql_host", default = "localhost", help = "community database host")
define("mysql_database", default = "webeta", help = "community database name")
define("mysql_user", default = "webeta", help = "community database user")
define("mysql_password", default = "webeta", help = "community database password")
class Application(tornado.web.Application):
def __init__(self):
settings = dict(
blog_title = u"webeta",
template_path = os.path.join(os.path.dirname(__file__), "templates"),
static_path = os.path.join(os.path.dirname(__file__), "static"),
root_path = os.path.join(os.path.dirname(__file__), "/"),
xsrf_cookies = False,
cookie_secret = "cookie_secret_code",
login_url = "/login",
autoescape = None,
jinja2 = Environment(loader = FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates")), trim_blocks = True),
reserved = ["user", "topic", "home", "setting", "forgot", "login", "logout", "register", "admin"],
debug=True,
)
handlers = [
(r"/(favicon\.ico)", tornado.web.StaticFileHandler, dict(path = settings["static_path"])),
(r"/(sitemap.*$)", tornado.web.StaticFileHandler, dict(path = settings["static_path"])),
(r"/(bdsitemap\.txt)", tornado.web.StaticFileHandler, dict(path = settings["static_path"])),
(r"/(orca\.txt)", tornado.web.StaticFileHandler, dict(path = settings["static_path"])),
(r"/", handler.index.IndexHandler),
(r"/weixin", handler.index.WeixinHandler),
(r"/shareit", handler.index.ShareItHandler),
(r"/t/(.*)", handler.index.TopicHandler),
(r"/addad", handler.index.AddAdHandler),
(r"/myshares", handler.index.MySharesHandler),
(r"/myads", handler.index.MyAdsHandler),
(r"/tb/(.*)", handler.index.TaobaoHandler),
(r"/prompt/(.*)", handler.index.TaobaoPromptHandler),
(r"/addtb", handler.index.AddTbHandler),
(r"/get/shop", handler.index.GetShopUUIDHandler),
(r"/shop/(.*)", handler.index.ShopHandler),
(r"/api/shop/(.*)", handler.index.GetShopItemsHandler),
(r"/mytbs", handler.index.MyTabaosHandler),
(r"/edit/tb/(.*)", handler.index.TaobaoEditHandler),
]
tornado.web.Application.__init__(self, handlers, **settings)
# Have one global connection to the blog DB across all handlers
self.db = torndb.Connection(
host = options.mysql_host, database = options.mysql_database,
user = options.mysql_user, password = options.mysql_password
)
# Have one global loader for loading models and handles
self.loader = Loader(self.db)
# Have one global model for db query
self.user_model = self.loader.use("user.model")
self.topic_model = self.loader.use("topic.model")
self.ad_model = self.loader.use("ad.model")
self.taobao_model = self.loader.use("taobao.model")
self.shop_model = self.loader.use("shop.model")
# Have one global session controller
self.session_manager = SessionManager(settings["cookie_secret"], ["127.0.0.1:11211"], 0)
# Have one global memcache controller
self.mc = memcache.Client(["127.0.0.1:11211"])
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| [
"gaolinjie@gmail.com"
] | gaolinjie@gmail.com |
ce2033c9bbcab7449f71973ced2d77fe349b5e39 | d476c93aa2aecd253508da0cc35071e456199318 | /test_autoarray/plot/wrap/base/test_colorbar.py | 44e53778fc90d48d4852ab5580bf6399337c6ff2 | [
"MIT"
] | permissive | Jammy2211/PyAutoArray | 82916f3f5530f938786f61f870df353b26732e37 | 6639dd86d21ea28e942155753ec556752735b4e4 | refs/heads/main | 2023-08-19T10:51:05.520942 | 2023-08-14T09:12:15 | 2023-08-14T09:12:15 | 210,980,464 | 6 | 5 | MIT | 2023-09-13T14:07:39 | 2019-09-26T02:18:10 | Python | UTF-8 | Python | false | false | 1,601 | py | import autoarray.plot as aplt
import matplotlib.pyplot as plt
import numpy as np
def test__loads_values_from_config_if_not_manually_input():
colorbar = aplt.Colorbar()
assert colorbar.config_dict["fraction"] == 3.0
assert colorbar.manual_tick_values == None
assert colorbar.manual_tick_labels == None
colorbar = aplt.Colorbar(
manual_tick_values=(1.0, 2.0), manual_tick_labels=(3.0, 4.0)
)
assert colorbar.manual_tick_values == (1.0, 2.0)
assert colorbar.manual_tick_labels == (3.0, 4.0)
colorbar = aplt.Colorbar()
colorbar.is_for_subplot = True
assert colorbar.config_dict["fraction"] == 0.1
colorbar = aplt.Colorbar(fraction=6.0)
colorbar.is_for_subplot = True
assert colorbar.config_dict["fraction"] == 6.0
def test__plot__works_for_reasonable_range_of_values():
figure = aplt.Figure()
fig, ax = figure.open()
plt.imshow(np.ones((2, 2)))
cb = aplt.Colorbar(fraction=1.0, pad=2.0)
cb.set(ax=ax, units=None)
figure.close()
fig, ax = figure.open()
plt.imshow(np.ones((2, 2)))
cb = aplt.Colorbar(
fraction=0.1,
pad=0.5,
manual_tick_values=[0.25, 0.5, 0.75],
manual_tick_labels=[1.0, 2.0, 3.0],
)
cb.set(ax=ax, units=aplt.Units())
figure.close()
fig, ax = figure.open()
plt.imshow(np.ones((2, 2)))
cb = aplt.Colorbar(fraction=0.1, pad=0.5)
cb.set_with_color_values(
cmap=aplt.Cmap().cmap, color_values=[1.0, 2.0, 3.0], ax=ax, units=None
)
figure.close()
| [
"james.w.nightingale@durham.ac.uk"
] | james.w.nightingale@durham.ac.uk |
c48fdcf193858cfb50927df777850acb30ebd52e | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4254/codes/1637_1055.py | e44021d6d961a9fd0df484c0a0024cf18b907868 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | # Teste seu código aos poucos.
# Não teste tudo no final, pois fica mais difícil de identificar erros.
# Use as mensagens de erro para corrigir seu código.
from math import*
vi = float(input("Valor da vel. inicial: "))
ang = radians(float(input("Valor do angulo: ")))
d = float(input("Valor da distancia horizontal: "))
g = 9.8
r = (( (vi**2) * ( sin(2*ang) ) )/g)
if(abs(d - r) <= 0.1):
print("sim")
else:
print("nao")
| [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
c1f078d04fb1349eb369dd17b1730b136b109c1b | bbf1153379eb6f8d0da97e7f608d8714f69bea2f | /masker.py | 3e8dbdfa38d1bb6e3504834ad4cca692e6ff1937 | [] | no_license | rubythonode/Fairy-zepeto-tech | 234fd96a26ba58f1267d723a7f9f8faeb6584fcc | b7f5e64fe9ae9ddeca91cb5c5a8629d9762f984e | refs/heads/master | 2020-12-23T11:56:46.095451 | 2019-05-11T09:08:12 | 2019-05-11T09:08:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,428 | py | import cv2
from imutils.video import VideoStream
import imutils
import dlib
def draw_dlib_rect(frame, rect):
x, y = rect.left(), rect.top()
w, h = rect.right() - x, rect.bottom() - y
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
def main():
detector = dlib.get_frontal_face_detector()
# predictor = dlib.shape_predictor(
# './shape_predictor_68_face_landmarks.dat')
vs = VideoStream(src=0, resolution=(1280, 960)).start()
fileStream = False
cv2.namedWindow('Frame', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Frame', 1000, 800)
prev_face = None
prev_idx = 0
PREV_MAX = 100
mask = cv2.imread('./mask.png')
mask_h, mask_w, _ = mask.shape
mask_x, mask_y = mask_w / 2, mask_h / 2
while True:
if fileStream and not vs.more():
break
frame = vs.read()
frame = imutils.resize(frame, width=960)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
try:
rects = detector(gray, 0)
rects = sorted(
rects,
key=lambda rect: rect.width() * rect.height(),
reverse=True)
# 면적(인식된 범위)이 가장 커다란 사각형(얼굴)을 가져옴
rect = rects[0]
except IndexError:
rect = None
if rect:
prev_idx = 0
if not rect:
if prev_face is not None and prev_idx < PREV_MAX:
rect = prev_face # 결과가 없는 경우 적절히 오래된(PREV_MAX) 이전 결과를 사용
prev_idx += 1
if rect: # 얼굴을 인식한 경우(prev_face를 사용하는 경우 포함)
prev_face = rect # 저장
# shape = get_shape(predictor, gray, rect)
draw_dlib_rect(frame, rect)
frame_x, frame_y = int(
(rect.right() + rect.left()) / 2), int(rect.top() + rect.bottom() / 2)
cv2.circle(frame, (frame_x, frame_y), 5, (0, 255, 0), -1)
dx = (frame_x - mask_x)
dy = (frame_y - mask_y)
frame[int(dy):int(dy + mask_h), int(dx):int(dx + mask_w)] = mask
cv2.imshow("Frame", frame) # 프레임 표시
# q 키를 눌러 종료
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
if __name__ == '__main__':
main()
| [
"32605822+JunhoYeo@users.noreply.github.com"
] | 32605822+JunhoYeo@users.noreply.github.com |
91f7e272e123bcc82effd0aa0590229161bc20a0 | 177d7066f6a0326ed937a56174d7e2241653929a | /Tree&Graphs/lc733.py | e1405eccb1370ebee80dc19fea3328286363cdba | [] | no_license | jasonusaco/Leetcode-Practice | 276bcdb62b28806b3d297338882f4b1eef56cc13 | 91dc73202eb9952a6064013ef4ed20dfa4137c01 | refs/heads/master | 2020-07-06T08:29:09.419062 | 2019-10-10T01:43:03 | 2019-10-10T01:43:03 | 202,955,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | class Solution:
def floodFill(self, image, sr, sc, newColor):
R=len(image)
C=len(image[0])
oldColor=image[sr][sc]
if oldColor==newColor:
return image
def dfs(r,c):
if image[r][c]==oldColor:
image[r][c]=newColor
if r>=1:
dfs(r-1,c)
if r<R-1:
dfs(r+1,c)
if c>=1:
dfs(r,c-1)
if c<C-1:
dfs(r,c+1)
dfs(sr,sc)
return image
| [
"yangyx@raysdata.com"
] | yangyx@raysdata.com |
323874b2e2f36c12b541fc62524c7fcb8b55acf7 | e1fada3a9846a5593e3d3d2fdc32b23b832e38b4 | /otx/mpa/seg/stage.py | d590b95c4f69f193b4c83ac34f749ec3408f023a | [
"Apache-2.0"
] | permissive | GalyaZalesskaya/openvino_training_extensions | fd1ebb189900008b16b85568449e5c62d8edbad5 | 6116639caeff100b06a6c10a96c7e7f5951f20c7 | refs/heads/develop | 2023-09-03T19:32:44.702497 | 2023-03-15T06:48:24 | 2023-03-15T06:48:24 | 202,568,309 | 0 | 0 | Apache-2.0 | 2019-10-28T16:16:27 | 2019-08-15T15:41:59 | Python | UTF-8 | Python | false | false | 5,579 | py | # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from mmcv import ConfigDict
from otx.algorithms.segmentation.adapters.mmseg.utils.builder import build_segmentor
from otx.mpa.stage import Stage
from otx.mpa.utils.config_utils import recursively_update_cfg
from otx.mpa.utils.logger import get_logger
logger = get_logger()
class SegStage(Stage):
MODEL_BUILDER = build_segmentor
def configure(self, model_cfg, model_ckpt, data_cfg, training=True, **kwargs):
"""Create MMCV-consumable config from given inputs"""
logger.info(f"configure!: training={training}")
cfg = self.cfg
self.configure_model(cfg, model_cfg, training, **kwargs)
self.configure_ckpt(cfg, model_ckpt, kwargs.get("pretrained", None))
self.configure_data(cfg, training, data_cfg)
self.configure_task(cfg, training, **kwargs)
self.configure_hook(cfg)
return cfg
def configure_model(self, cfg, model_cfg, training, **kwargs):
if model_cfg:
if hasattr(model_cfg, "model"):
cfg.merge_from_dict(model_cfg._cfg_dict)
else:
raise ValueError(
"Unexpected config was passed through 'model_cfg'. "
"it should have 'model' attribute in the config"
)
cfg.model_task = cfg.model.pop("task", "segmentation")
if cfg.model_task != "segmentation":
raise ValueError(f"Given model_cfg ({model_cfg.filename}) is not supported by segmentation recipe")
# OV-plugin
ir_model_path = kwargs.get("ir_model_path")
if ir_model_path:
def is_mmov_model(k, v):
if k == "type" and v.startswith("MMOV"):
return True
return False
ir_weight_path = kwargs.get("ir_weight_path", None)
ir_weight_init = kwargs.get("ir_weight_init", False)
recursively_update_cfg(
cfg,
is_mmov_model,
{"model_path": ir_model_path, "weight_path": ir_weight_path, "init_weight": ir_weight_init},
)
def configure_data(self, cfg, training, data_cfg, **kwargs): # noqa: C901
# Data
if data_cfg:
cfg.merge_from_dict(data_cfg)
# Dataset
super().configure_data(cfg, training, **kwargs)
src_data_cfg = Stage.get_data_cfg(cfg, "train")
for mode in ["train", "val", "test"]:
if src_data_cfg.type == "MPASegDataset" and cfg.data.get(mode, False):
if cfg.data[mode]["type"] != "MPASegDataset":
# Wrap original dataset config
org_type = cfg.data[mode]["type"]
cfg.data[mode]["type"] = "MPASegDataset"
cfg.data[mode]["org_type"] = org_type
def configure_task(self, cfg, training, **kwargs):
"""Adjust settings for task adaptation"""
if cfg.get("task_adapt", None):
logger.info(f"task config!!!!: training={training}")
task_adapt_op = cfg["task_adapt"].get("op", "REPLACE")
# Task classes
self.configure_classes(cfg, task_adapt_op)
# Ignored mode
self.configure_ignore(cfg)
def configure_classes(self, cfg, task_adapt_op):
# Task classes
org_model_classes = self.get_model_classes(cfg)
data_classes = self.get_data_classes(cfg)
if "background" not in org_model_classes:
org_model_classes = ["background"] + org_model_classes
if "background" not in data_classes:
data_classes = ["background"] + data_classes
# Model classes
if task_adapt_op == "REPLACE":
if len(data_classes) == 1: # 'background'
model_classes = org_model_classes.copy()
else:
model_classes = data_classes.copy()
elif task_adapt_op == "MERGE":
model_classes = org_model_classes + [cls for cls in data_classes if cls not in org_model_classes]
else:
raise KeyError(f"{task_adapt_op} is not supported for task_adapt options!")
cfg.task_adapt.final = model_classes
cfg.model.task_adapt = ConfigDict(
src_classes=org_model_classes,
dst_classes=model_classes,
)
# Model architecture
if "decode_head" in cfg.model:
decode_head = cfg.model.decode_head
if isinstance(decode_head, dict):
decode_head.num_classes = len(model_classes)
elif isinstance(decode_head, list):
for head in decode_head:
head.num_classes = len(model_classes)
# For SupConDetCon
if "SupConDetCon" in cfg.model.type:
cfg.model.num_classes = len(model_classes)
# Task classes
self.org_model_classes = org_model_classes
self.model_classes = model_classes
def configure_ignore(self, cfg):
# Change to incremental loss (ignore mode)
if cfg.get("ignore", False):
cfg_loss_decode = ConfigDict(
type="CrossEntropyLossWithIgnore",
use_sigmoid=False,
loss_weight=1.0,
)
if "decode_head" in cfg.model:
decode_head = cfg.model.decode_head
if decode_head.type == "FCNHead":
decode_head.type = "CustomFCNHead"
decode_head.loss_decode = cfg_loss_decode
| [
"noreply@github.com"
] | GalyaZalesskaya.noreply@github.com |
4c19f566aefa3011a9090945688d333dec852953 | d2e4206ce78451b08bc742e4376316077236e418 | /RemoveAllAdjacentDuplicatesinStringGreaterThanEqualToK.py | 5650df59145581dd769a0da1af47d40e884cdfb9 | [] | no_license | coolsgupta/leetcode | cb25a62999bc0fe20fd7b250c5056c4b6cc3bdca | 64ad78da317c783ffc68357524daa38be0074417 | refs/heads/master | 2023-04-20T11:26:25.638424 | 2021-05-12T05:04:56 | 2021-05-12T05:04:56 | 238,100,015 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | class Solution:
def removeDuplicates(self, s: str, k: int) -> str:
stack = []
i = 0
while (i < len(s)):
x = s[i]
if not stack:
stack.append([x, 1])
elif stack[-1][0] != x:
if stack[-1][1] >= k:
stack.pop()
if stack and stack[-1][0] == x:
stack[-1][1] += 1
else:
stack.append([x, 1])
elif stack[-1][0] == x:
stack[-1][1] += 1
i += 1
if stack[-1][1] >= k:
stack.pop()
return ''.join(map(lambda x: x[0] * x[1], stack))
| [
"cool.sgupta@gmail.com"
] | cool.sgupta@gmail.com |
cc357f6f1e4bea37c08347ba9e8dad0df543adc7 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1_neat/16_0_1_aleph5381_a.py | 92a8b86a7abbd771359c6c39ae5adc41704a46a0 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 661 | py | #!/usr/bin/env python3
import sys
rl = lambda: sys.stdin.readline()
T = int(rl())
def solve(casei):
line = rl().split()
N = int(line[0])
if N is 0:
print("Case #{}: INSOMNIA".format(casei))
return
check = [0] * 10
x = 0
fin = False
while fin is False:
x += N # x == ans*N
tmp = x
while tmp > 0:
check[tmp%10] = 1
tmp = tmp // 10
fin = True
for i in range(10):
if check[i] == 0:
fin = False
break
print("Case #{}: {}".format(casei, x))
for i in range(1, T+1):
solve(i)
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
f9e8a99edebd2f573d6d2be807df5f607a9f8f7c | af4abf0a22db1cebae466c56b45da2f36f02f323 | /storage/fase2/team14/storage/Blockchain.py | e165b05dd9bb4ba6701acce857ef42272e491acf | [
"MIT"
] | permissive | joorgej/tytus | 0c29408c09a021781bd3087f419420a62194d726 | 004efe1d73b58b4b8168f32e01b17d7d8a333a69 | refs/heads/main | 2023-02-17T14:00:00.571200 | 2021-01-09T00:48:47 | 2021-01-09T00:48:47 | 322,429,634 | 3 | 0 | MIT | 2021-01-09T00:40:50 | 2020-12-17T22:40:05 | Python | UTF-8 | Python | false | false | 5,175 | py | import json
import hashlib
import os
import csv
def turn_on_safe_mode(database, table):
blockchain = {}
with open('data/info/safeModeTables/' + database + table + '.json', 'w') as file:
json.dump(blockchain, file, indent=4)
def turn_off_safe_mode(database, table):
os.remove('data/info/safeModeTables/' + database + table + '.json')
def concat_register(register):
concat_string = ''
for i in register:
concat_string += str(i)
return concat_string
def generate_hash(string_data):
return hashlib.sha256(string_data.encode()).hexdigest()
def generate_chain(database, table, registers):
blockchain = {}
blockId = 1
previous = '0000000000000000000000000000000000000000000000000000000000000000'
for register in registers:
hash = generate_hash(concat_register(register))
blockchain[blockId] = {'blockId': blockId, 'data': register, 'previous': previous, 'hash': hash, 'status': 0}
blockId += 1
previous = hash
with open('data/info/safeModeTables/' + database + table + '.json', 'w') as file:
json.dump(blockchain, file, indent=4)
def update_block(database, table, newRegister, oldRegister):
oldHash = generate_hash(concat_register(oldRegister))
newHash = generate_hash(concat_register(newRegister))
with open('data/info/safeModeTables/' + database + table + '.json', 'r') as file:
blockchain = json.load(file)
for blockId in blockchain:
if blockchain[blockId]['hash'] == oldHash:
blockchain[blockId]['data'] = newRegister
blockchain[blockId]['hash'] = newHash
blockchain[blockId]['status'] = 1
break
with open('data/info/safeModeTables/' + database + table + '.json', 'w') as file:
json.dump(blockchain, file, indent=4)
def delete_block(database, table, register):
hash = generate_hash(concat_register(register))
with open('data/info/safeModeTables/' + database + table + '.json') as file:
blockchain = json.load(file)
for blockId in blockchain:
if blockchain[blockId]['hash'] == hash:
del blockchain[blockId]
break
with open('data/info/safeModeTables/' + database + table + '.json', 'w') as file:
json.dump(blockchain, file, indent=4)
def insert_block(database, table, register):
with open('data/info/safeModeTables/' + database + table + '.json') as file:
blockchain = json.load(file)
if len(blockchain) == 0:
previous = '0000000000000000000000000000000000000000000000000000000000000000'
blockId = 1
else:
previousId = int(list(blockchain.keys())[-1])
previous = blockchain[str(previousId)]['hash']
blockId = previousId + 1
hash = generate_hash(concat_register(register))
blockchain[blockId] = {'blockId': blockId, 'data': register, 'previous': previous, 'hash': hash, 'status': 0}
with open('data/info/safeModeTables/' + database + table + '.json', 'w') as file:
json.dump(blockchain, file, indent=4)
def chartBlockchain(database, table):
blockchain = None
with open('data/info/safeModeTables/' + database + table + '.json') as file:
blockchain = json.load(file)
file = open('blockchain.dot', 'w')
file.write('digraph blockchain {\n')
file.write('rankdir=LR;\n')
file.write('node[shape=box]\n')
color = '#DCF0C2'
previous = '0000000000000000000000000000000000000000000000000000000000000000'
if len(blockchain) > 0:
for i in blockchain.values():
if color == '#DCF0C2' and (i['status'] == 1 or i['previous'] != previous):
color = '#F3ABAB'
file.write(str(i['blockId']) + '[label=<')
file.write('<TABLE BORDER="0" BGCOLOR=' + '"' + color + '" ' +
'CELLBORDER="1" CELLSPACING="0" CELLPADDING="4">')
file.write('<TR><TD>' + 'Bloque: ' + '</TD><TD>' + '# ' + str(i['blockId']) + '</TD></TR>')
file.write('<TR><TD>' + 'Datos: ' + '</TD><TD>' + str(i['data']) + '</TD></TR>')
file.write('<TR><TD>' + 'Anterior: ' + '</TD><TD>' + str(i['previous']) + '</TD></TR>')
file.write('<TR><TD>' + 'Hash: ' + '</TD><TD>' + str(i['hash']) + '</TD></TR>')
file.write('</TABLE>')
file.write('>, ];')
previous = i['hash']
count = 0
nodes = list(blockchain.keys())
for i in nodes:
if count + 1 < len(nodes):
file.write(nodes[count] + '->' + nodes[count + 1] + '\n')
count += 1
file.write('}')
file.close()
os.system("dot -Tpng blockchain.dot -o blockchain.png")
os.system('blockchain.png')
def insert_block_CSV(results, file, database, table):
count = 0
with open(file, 'r') as f:
reader = csv.reader(f, delimiter=',')
for i in reader:
if results[count] == 0:
insert_block(database, table, i)
count += 1
| [
"noreply@github.com"
] | joorgej.noreply@github.com |
e40541251a9d5577e3a17f6424b708ed485084f5 | 34ef54c04b369a6161c6f8a649868a47122a2d89 | /.venv/Lib/site-packages/astroid/brain/brain_hashlib.py | a6582de9504bc79b6cb428620bbacdf4b4b44262 | [
"MIT"
] | permissive | abner-lucas/tp-cruzi-db | f70ad269c50a2db24debd1455daeddaa2ebd3923 | 595c5c46794ae08a1f19716636eac7430cededa1 | refs/heads/bioinformatica | 2023-05-18T23:23:23.458394 | 2021-06-14T02:13:17 | 2021-06-14T02:13:17 | 351,864,250 | 2 | 2 | MIT | 2021-06-13T19:52:18 | 2021-03-26T17:40:20 | Python | UTF-8 | Python | false | false | 2,400 | py | # Copyright (c) 2016, 2018, 2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2018 David Poirier <david-poirier-csn@users.noreply.github.com>
# Copyright (c) 2018 wgehalo <wgehalo@gmail.com>
# Copyright (c) 2018 Ioana Tagirta <ioana.tagirta@gmail.com>
# Copyright (c) 2020-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2020 David Gilman <davidgilman1@gmail.com>
# Copyright (c) 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import sys
import astroid
PY36 = sys.version_info >= (3, 6)
def _hashlib_transform():
signature = "value=''"
template = """
class %(name)s(object):
def __init__(self, %(signature)s): pass
def digest(self):
return %(digest)s
def copy(self):
return self
def update(self, value): pass
def hexdigest(self):
return ''
@property
def name(self):
return %(name)r
@property
def block_size(self):
return 1
@property
def digest_size(self):
return 1
"""
algorithms_with_signature = dict.fromkeys(
["md5", "sha1", "sha224", "sha256", "sha384", "sha512"], signature
)
if PY36:
blake2b_signature = "data=b'', *, digest_size=64, key=b'', salt=b'', \
person=b'', fanout=1, depth=1, leaf_size=0, node_offset=0, \
node_depth=0, inner_size=0, last_node=False"
blake2s_signature = "data=b'', *, digest_size=32, key=b'', salt=b'', \
person=b'', fanout=1, depth=1, leaf_size=0, node_offset=0, \
node_depth=0, inner_size=0, last_node=False"
new_algorithms = dict.fromkeys(
["sha3_224", "sha3_256", "sha3_384", "sha3_512", "shake_128", "shake_256"],
signature,
)
algorithms_with_signature.update(new_algorithms)
algorithms_with_signature.update(
{"blake2b": blake2b_signature, "blake2s": blake2s_signature}
)
classes = "".join(
template % {"name": hashfunc, "digest": 'b""', "signature": signature}
for hashfunc, signature in algorithms_with_signature.items()
)
return astroid.parse(classes)
astroid.register_module_extender(astroid.MANAGER, "hashlib", _hashlib_transform)
| [
"abnerlucas.cad@gmail.com"
] | abnerlucas.cad@gmail.com |
c66fb5735b664f20d647f3bc49bc6ba93a005a3d | f6703b2afca284bf75e0dbf8f61d77e5251f905c | /euler55.py | 8409d725d3672ffc5ec2ce87dddc8a3c639332e4 | [] | no_license | rwieckowski/project-euler-python | 2a7aa73670b4684f076ad819bfc464aa0778f96c | be9a455058b20adfd32c814effd8753cc9d39890 | refs/heads/master | 2021-01-10T21:10:44.875335 | 2015-06-23T13:29:58 | 2015-06-23T13:29:58 | 37,920,684 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | """
If we take 47 reverse and add 47 74 121 which is palindromic
Not all numbers produce palindromes so quickly For example
349 943 1292
1292 2921 4213
4213 3124 7337
That is 349 took three iterations to arrive at a palindrome
Although no one has proved it yet it is thought that some numbers like
196 never produce a palindrome A number that never forms a palindrome
through the reverse and add process is called a Lychrel number Due to
the theoretical nature of these numbers and for the purpose of this
problem we shall assume that a number is Lychrel until proven
otherwise In addition you are given that for every number below ten
thousand it will either i become a palindrome in less than fifty
iterations or ii no one with all the computing power that exists has
managed so far to map it to a palindrome In fact 10677 is the first
number to be shown to require over fifty iterations before producing a
palindrome 4668731596684224866951378664 53 iterations 28digits
Surprisingly there are palindromic numbers that are themselves Lychrel
numbers the first example is 4994
How many Lychrel numbers are there below tenthousand
NOTE Wording was modified slightly on 24 April 2007 to emphasise the
theoretical nature of Lychrel numbers
"""
def euler55():
"""
>>> euler55()
'to-do'
"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | [
"rwieckowski@ivmx.pl"
] | rwieckowski@ivmx.pl |
c40ee3ce0fccd75e6378521c50021da41918068a | b02a5015ecc61414834c4b24e5f33168eb99070a | /CCscripts/MakeAgreementGraph.py | e6b906884fb9acab5b5c0c84e68a61196f62ba47 | [
"MIT"
] | permissive | mrvollger/SDA | f1aa8edf9989125d7e0c0f6ae159bca495915826 | 3d5e9ec8d1e7ac97121c33c6be80d635392631cf | refs/heads/master | 2023-05-13T05:24:54.665854 | 2023-05-07T23:40:25 | 2023-05-07T23:40:25 | 101,452,926 | 29 | 5 | MIT | 2019-11-21T18:08:13 | 2017-08-26T00:58:01 | Python | UTF-8 | Python | false | false | 3,729 | py | #!/usr/bin/env python
import argparse
import ABPUtils
import numpy as np
import sys
import networkx as nx
import matplotlib.pyplot as plt
ap = argparse.ArgumentParser(description="Sort by haplotype")
ap.add_argument("mat", help="snv matrix file")
ap.add_argument("--out", help="Output file", default="/dev/stdout")
ap.add_argument("--graph", help="Write graph to this file.", default=None)
ap.add_argument("--alph", help="Alphabet to use, 3 characters: ref,alt,gap", default='.1n')
ap.add_argument("--cov", help="Average coverage.", type=float, default=60)
ap.add_argument("--score_cutoff", help="Prune connections below this score.",type=int, default=15)
#args = ap.parse_args('assembly.consensus.fragments.snv.mat.categorized')
args = ap.parse_args()
alph = list(args.alph)
mat = open(args.mat)
outFile = open(args.out, 'w')
#freqLine = mat.readline()
#freq = np.array(freqLine.split())
#print freq
gtList = []
groups = {}
index = 0
groupList = []
coverage = []
(gt, readNames, groupList, groups) = ABPUtils.ReadGenotypeMatrix(mat)
altList = []
refList = []
r=alph[0]
a=alph[1]
g=alph[2]
(altList, refList) = ABPUtils.GetRefAltLists(gt)
coverage = np.array([len(np.where(gt[:,i]!=g)[0]) for i in range(0,gt.shape[1])])
ngt = len(gt)
# Compare distance to members in the group
allGroups = np.array(groups.keys())
allScores = []
nScores = []
scoreIndices = []
for i in range(0,ngt):
innerMat = []
innerMis = []
scores = []
for j in range(0,ngt):
if (j == i):
continue
nMatch = len(np.intersect1d(altList[i],altList[j], assume_unique=True))
nMis = len(np.intersect1d(altList[i],refList[j], assume_unique=True))+\
len(np.intersect1d(refList[i],altList[j], assume_unique=True))
scores.append([nMatch - nMis, j])
minAlt = 0
minRef = 0
scoreMat = np.array(sorted(scores, reverse=True))
if (len(altList[i]) > 0 and len(refList[i]) > 0):
(start,end) = ABPUtils.GetRange(gt[i], g)
readCov = coverage[start]
bestN = min(readCov, int(args.cov))
readScores = scoreMat[0:bestN,0]
minScore = min(readScores)
scores = np.array([minScore]*int(args.cov))
scores[0:bestN] = readScores
outFile.write("\t".join([str(x) for x in scores]) + "\n")
# record this in a matrix
scoreIndices.append(i)
nScores.append(bestN)
allScores.append(scoreMat[0:bestN,:])
def TripleToHex(x):
return "#{:02x}{:02x}{:02x}{:02x}".format(int(x[0]*255),int(x[1]*255),int(x[2]*255),int(x[3]*255))
def TripleToTuple(x):
return "{},{},{}".format(int(x[0]*255),int(x[1]*255),int(x[2]*255))
if (args.graph is not None):
g = nx.Graph()
nColors = len(groups.keys())
groupNames = groups.keys()
cm = plt.get_cmap("Set1")
colors = [TripleToHex(cm(int(i*float(float(cm.N)/nColors)))) for i in range(0,len(groupNames))]
print colors
groupCM = { groupNames[i]: colors[i] for i in range(0,len(colors))}
print groupCM
for i in range(0,ngt):
g.add_node(i, color=groupCM[groupList[i]])
idx = 0
for i in range(0,ngt):
if (idx >= len(scoreIndices)):
break
if (scoreIndices[idx] != i):
continue
for j in range(0,len(allScores[idx])):
if (allScores[idx][j][0] < args.score_cutoff):
break
g.add_edge(i,allScores[idx][j][1])
idx+=1
if (args.graph.find("gml") >= 0):
nx.write_gml(g, args.graph)
elif (args.graph.find("gexf") >= 0):
nx.write_gexf(g, args.graph)
elif (args.graph.find("graphml") >= 0):
nx.write_graphml(g, args.graph)
| [
"mrvollger@gmail.com"
] | mrvollger@gmail.com |
64683810f9c43df3333a32d6404f1f6af85bc005 | f1c20d0836f4815b81c895ffe22a29005db3746d | /backend/main/settings/base.py | ed93056e2f01bdbdcf212ab4fe28dc7d14f35e97 | [] | no_license | pavelm2007/leadersofdigital_2020_04 | 6ceacf0858ea46bd73c5a0e0ab120cae802e85bd | 0132d1b3361518b109b0632daaf13ed8e849192d | refs/heads/main | 2023-04-04T21:12:54.890040 | 2021-04-17T20:37:02 | 2021-04-17T20:37:02 | 358,649,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,933 | py | # https://docs.djangoproject.com/en/1.10/ref/settings/
import os
from decouple import config # noqa
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def base_dir_join(*args):
return os.path.join(BASE_DIR, *args)
SITE_ID = 1
SECURE_HSTS_PRELOAD = True
DEBUG = True
ADMINS = (("Admin", "foo@example.com"),)
AUTH_USER_MODEL = "users.User"
ALLOWED_HOSTS = []
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django_js_reverse",
"webpack_loader",
"import_export",
"apps.common",
"apps.users",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "main.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [base_dir_join("templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"apps.common.context_processors.sentry_dsn",
"apps.common.context_processors.commit_sha",
],
},
},
]
WSGI_APPLICATION = "main.wsgi.application"
AUTH_PASSWORD_VALIDATORS = [
{"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", },
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", },
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", },
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", },
]
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATICFILES_DIRS = (base_dir_join("../frontend"),)
# Webpack
WEBPACK_LOADER = {
"DEFAULT": {
"CACHE": False, # on DEBUG should be False
"STATS_FILE": base_dir_join("../webpack-stats.json"),
"POLL_INTERVAL": 0.1,
"IGNORE": [".+\.hot-update.js", ".+\.map"],
}
}
# Celery
CELERY_ACCEPT_CONTENT = ["json"]
CELERY_TASK_SERIALIZER = "json"
CELERY_RESULT_SERIALIZER = "json"
CELERY_ACKS_LATE = True
CELERY_TIMEZONE = TIME_ZONE
# Sentry
SENTRY_DSN = config("SENTRY_DSN", default="")
COMMIT_SHA = config("HEROKU_SLUG_COMMIT", default="")
| [
"pavelm2007@yandex.ru"
] | pavelm2007@yandex.ru |
64649915912e14ca161b5972b75805aaa8f7bc29 | c92b6c0b59d25018de5b51c6d88f4764e5b713d7 | /ligo/skymap/conftest.py | 102ff41df148897c8cdc94c7f0d69d5122e5549c | [] | no_license | lpsinger/ligo.skymap | 9ecb3480859a3bc7e09332118aa151b47cf50dc8 | 35d451804acb859141a39296f8d6f760802fc78c | refs/heads/main | 2023-08-30T21:01:00.223367 | 2023-08-21T14:03:13 | 2023-08-21T14:03:13 | 124,963,286 | 24 | 18 | null | 2023-07-08T12:53:23 | 2018-03-12T23:17:14 | Python | UTF-8 | Python | false | false | 2,321 | py | # This file is used to configure the behavior of pytest when using the Astropy
# test infrastructure. It needs to live inside the package in order for it to
# get picked up when running the tests inside an interpreter using
# packagename.test
import warnings
from astropy.version import version as astropy_version
import pytest
try:
from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
ASTROPY_HEADER = True
except ImportError:
ASTROPY_HEADER = False
def pytest_configure(config):
if ASTROPY_HEADER:
config.option.astropy_header = True
# Customize the following lines to add/remove entries from the list of
# packages for which version numbers are displayed when running the tests.
PYTEST_HEADER_MODULES.pop('Pandas', None)
PYTEST_HEADER_MODULES['astropy'] = 'astropy'
PYTEST_HEADER_MODULES['astropy-healpix'] = 'astropy_healpix'
PYTEST_HEADER_MODULES['healpy'] = 'healpy'
PYTEST_HEADER_MODULES['reproject'] = 'reproject'
from . import __version__
packagename = 'ligo.skymap'
TESTED_VERSIONS[packagename] = __version__
# Uncomment the last two lines in this block to treat all DeprecationWarnings as
# exceptions. For Astropy v2.0 or later, there are 2 additional keywords,
# as follow (although default should work for most cases).
# To ignore some packages that produce deprecation warnings on import
# (in addition to 'compiler', 'scipy', 'pygments', 'ipykernel', and
# 'setuptools'), add:
# modules_to_ignore_on_import=['module_1', 'module_2']
# To ignore some specific deprecation warning messages for Python version
# MAJOR.MINOR or later, add:
# warnings_to_ignore_by_pyver={(MAJOR, MINOR): ['Message to ignore']}
# from astropy.tests.helper import enable_deprecations_as_exceptions # noqa
# enable_deprecations_as_exceptions()
@pytest.fixture(autouse=True)
def ignore_unclosed_file_warnings():
"""Ignore unclosed file warnings.
Many of the command-line tools in :mod:`ligo.skymap.tool` use
:class:`arparse.FileType` and therefore leave files opened. Suppress
warnings about unclosed files so that other more interesting warning types
are more noticable.
"""
warnings.filterwarnings('ignore', 'unclosed file .*', ResourceWarning)
| [
"leo.singer@ligo.org"
] | leo.singer@ligo.org |
dedd346d54e7685a9a5faf73d1ec612f64bd2a8b | 02e5b1240db2ef04b4f8b661a9ac4ce060144d74 | /experiments/debug_algorithm_comparison.py | a478002d69b94f10e3cab276763d6d6087c35cfb | [
"MIT"
] | permissive | jayeshchoudhari/pyhawkes | b3b143a5040730826c23a9b3703159dbeb9bf21d | f4b0e6e3ce7f74e647f0ed2254ea334c22d6e82b | refs/heads/master | 2021-06-12T12:55:54.740142 | 2017-03-27T06:47:16 | 2017-03-27T06:47:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,309 | py | """
Compare the various algorithms on a synthetic dataset.
"""
import cPickle
import os
import copy
import gzip
import numpy as np
# Use the Agg backend in running on a server without the DISPLAY variable
if "DISPLAY" not in os.environ:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# import brewer2mpl
# colors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
# goodcolors = np.array([0,1,2,4,6,7,8])
# colors = np.array(colors)[goodcolors]
import harness
def load_data(data_path, test_path):
with gzip.open(data_path, 'r') as f:
S, true_model = cPickle.load(f)
with gzip.open(test_path, 'r') as f:
S_test, test_model = cPickle.load(f)
return S, S_test, true_model
def plot_pred_ll_vs_time(models, results, burnin=0,
std_ll=np.nan,
true_ll=np.nan):
from hips.plotting.layout import create_figure
from hips.plotting.colormaps import harvard_colors
# Make the ICML figure
fig = create_figure((4,3))
ax = fig.add_subplot(111)
col = harvard_colors()
plt.grid()
t_start = 0
t_stop = 0
for i, (model, result) in enumerate(zip(models, results)):
plt.plot(result.timestamps[burnin:], result.test_lls[burnin:], lw=2, color=col[i], label=model)
# Update time limits
t_start = min(t_start, result.timestamps[burnin:].min())
t_stop = max(t_stop, result.timestamps[burnin:].max())
# plt.legend(loc="outside right")
# Plot the standard Hawkes test ll
plt.plot([t_start, t_stop], std_ll*np.ones(2), lw=2, color=col[len(models)], label="Std.")
# Plot the true ll
plt.plot([t_start, t_stop], true_ll*np.ones(2), '--k', lw=2, label="True")
ax.set_xlim(t_start, t_stop)
ax.set_xlabel("time [sec]")
ax.set_ylabel("Pred. Log Lkhd.")
plt.show()
def plot_impulse_responses(models, results):
from hips.plotting.layout import create_figure
from hips.plotting.colormaps import harvard_colors
# Make the ICML figure
fig = create_figure((6,6))
col = harvard_colors()
plt.grid()
y_max = 0
for i, (model, result) in enumerate(zip(models, results)):
smpl = result.samples[-1]
W = smpl.W_effective
if "continuous" in str(smpl.__class__).lower():
t, irs = smpl.impulses
for k1 in xrange(K):
for k2 in xrange(K):
plt.subplot(K,K,k1*K + k2 + 1)
plt.plot(t, W[k1,k2] * irs[:,k1,k2], color=col[i], lw=2)
else:
irs = smpl.impulses
for k1 in xrange(K):
for k2 in xrange(K):
plt.subplot(K,K,k1*K + k2 + 1)
plt.plot(W[k1,k2] * irs[:,k1,k2], color=col[i], lw=2)
y_max = max(y_max, (W*irs).max())
for k1 in xrange(K):
for k2 in xrange(K):
plt.subplot(K,K,k1*K+k2+1)
plt.ylim(0,y_max*1.05)
plt.show()
# def run_comparison(data_path, test_path, output_dir, T_train=None, seed=None):
# """
# Run the comparison on the given data file
# :param data_path:
# :return:
# """
if __name__ == "__main__":
seed = None
run = 1
K = 4
C = 1
T = 1000
T_train = 1000
T_test = 1000
data_path = os.path.join("data", "synthetic", "synthetic_K%d_C%d_T%d.pkl.gz" % (K,C,T))
test_path = os.path.join("data", "synthetic", "synthetic_test_K%d_C%d_T%d.pkl.gz" % (K,C,T_test))
output_dir = os.path.join("results", "synthetic_K%d_C%d_T%d" % (K,C,T_train), "run%03d" % run)
# run_comparison(data_path, test_path, output_dir, T_train=T_train, seed=seed)
if seed is None:
seed = np.random.randint(2**32)
print "Setting seed to ", seed
np.random.seed(seed)
assert os.path.exists(os.path.dirname(output_dir)), "Output directory does not exist!"
S, S_test, true_model = load_data(data_path, test_path)
# If T_train is given, only use a fraction of the dataset
if T_train is not None:
S = S[:T_train,:]
# Use the true basis
dt, dt_max = true_model.dt, true_model.dt_max
basis = true_model.basis
network = true_model.network
# First fit the standard model
results = []
output_path = os.path.join(output_dir, "std.pkl.gz")
std_results = \
harness.fit_standard_hawkes_model_bfgs(S, S_test, dt, dt_max, output_path,
model_args={"basis": basis, "alpha": 1.0, "beta": 1.0})
std_model = std_results.samples[0]
# results.append(std_results)
# Now fit the Bayesian models with MCMC or VB,
# initializing with the standard model
models = [
"SS-DTH (Gibbs)",
#"SS-CTH (Gibbs)",
"MoG-DTH (VB)",
"MoG-DTH (SVI)"
]
methods = [
harness.fit_spikeslab_network_hawkes_gibbs,
#harness.fit_ct_network_hawkes_gibbs,
harness.fit_network_hawkes_vb,
harness.fit_network_hawkes_svi
]
inf_args = [
{"N_samples": 3000, "standard_model": std_model},
#{"N_samples": 1000, "standard_model": std_model},
{"N_samples": 3000, "standard_model": std_model},
{"N_samples": 3000, "standard_model": std_model}
]
model_args = [
{"basis": basis, "network": copy.deepcopy(network), "weight_hypers": {"parallel_resampling": False}},
#{"network": copy.deepcopy(network), "impulse_hypers" : {"mu_0": 0., "lmbda_0": 2.0, "alpha_0": 2.0, "beta_0" : 1.0}},
{"basis": basis, "network": copy.deepcopy(network)},
{"basis": basis, "network": copy.deepcopy(network)},
]
assert len(models) == len(methods) == len(inf_args) == len(model_args)
for model, method, iargs, margs in zip(models, methods, inf_args, model_args):
output_path = os.path.join(output_dir, model.lower() + ".pkl.gz")
results.append(method(S, S_test, dt, dt_max, output_path,
model_args=margs,
**iargs))
# Plot the reuslts
plt.ion()
plot_pred_ll_vs_time(models, results, burnin=1,
std_ll=std_results.test_lls[-1],
true_ll=true_model.heldout_log_likelihood(S_test))
# Plot impulse responses
# plot_impulse_responses(models, results)
| [
"scott.linderman@gmail.com"
] | scott.linderman@gmail.com |
8f4254578ec4c86546e4c356104dd919b4821bc6 | 6d80ce7a1f44ddf5741fd190ddfe0d9be8e5f162 | /model/recognition_model/MORAN_V2/models/fracPickup.py | c77c4b2472d2cf2ef4c6501bff03d3cb0602d5ac | [
"MIT"
] | permissive | dun933/FudanOCR | dd8830ca4b8ebb08acd31326fcf5aa3c961886a0 | fd79b679044ea23fd9eb30691453ed0805d2e98b | refs/heads/master | 2021-04-03T19:50:47.646099 | 2020-03-16T08:43:59 | 2020-03-16T08:43:59 | 248,391,401 | 1 | 0 | MIT | 2020-03-19T02:23:11 | 2020-03-19T02:23:10 | null | UTF-8 | Python | false | false | 1,448 | py | import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import numpy.random as npr
class fracPickup(nn.Module):
def __init__(self, CUDA=True):
super(fracPickup, self).__init__()
self.cuda = CUDA
def forward(self, x):
x_shape = x.size()
assert len(x_shape) == 4
assert x_shape[2] == 1
fracPickup_num = 0
h_list = 1.
w_list = np.arange(x_shape[3])*2./(x_shape[3]-1)-1
for i in range(fracPickup_num):
idx = int(npr.rand()*len(w_list))
if idx <= 0 or idx >= x_shape[3]-1:
continue
beta = npr.rand()/4.
value0 = (beta*w_list[idx] + (1-beta)*w_list[idx-1])
value1 = (beta*w_list[idx-1] + (1-beta)*w_list[idx])
# Modified
w_list[idx-1] = value1
w_list[idx] = value0
grid = np.meshgrid(
w_list,
h_list,
indexing='ij'
)
grid = np.stack(grid, axis=-1)
grid = np.transpose(grid, (1, 0, 2))
grid = np.expand_dims(grid, 0)
grid = np.tile(grid, [x_shape[0], 1, 1, 1])
grid = torch.from_numpy(grid).type(x.data.type())
if self.cuda:
grid = grid.cuda()
self.grid = Variable(grid, requires_grad=False)
x_offset = nn.functional.grid_sample(x, self.grid)
return x_offset
| [
"576194329@qq.com"
] | 576194329@qq.com |
0682e73e7996182b5c7e1fc9d9644fdff3829b00 | 11df0f91cb97d974a8097a74a907dadfdf63e5a3 | /plugins/jobs/setup.py | ba8f34b55e76d90d049415c52752673fce8b6d66 | [
"Apache-2.0"
] | permissive | kotfic/girder | 730b8234a51e8428952cf359cd5ddb3ccb992510 | 461faf52288c8fc4936f1e7a2ff08ee5a674f324 | refs/heads/master | 2021-01-15T21:07:53.923485 | 2018-10-24T15:03:53 | 2018-10-24T15:03:53 | 40,503,503 | 0 | 0 | Apache-2.0 | 2018-10-05T14:49:29 | 2015-08-10T20:05:16 | Python | UTF-8 | Python | false | false | 1,867 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from setuptools import setup, find_packages
# perform the install
setup(
name='girder-jobs',
version='3.0.0a1',
description='A general purpose plugin for managing offline jobs.',
author='Kitware, Inc.',
author_email='kitware@kitware.com',
url='http://girder.readthedocs.io/en/latest/plugins.html#jobs',
license='Apache 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5'
],
include_package_data=True,
packages=find_packages(exclude=['plugin_tests']),
zip_safe=False,
install_requires=['girder>=3.0.0a1'],
entry_points={
'girder.plugin': [
'jobs = girder_jobs:JobsPlugin'
]
}
)
| [
"jonathan.beezley@kitware.com"
] | jonathan.beezley@kitware.com |
95acbaf7ca434dbb5ce338d2f69a29098e9a845c | 744096e063ffb4cdb017f60e6dfae410a51c789a | /keras/keras78_iris_cnn.py | 76a91517f44f06c521e9584fdf7f72c5fcb4fbf3 | [] | no_license | elf0508/Study-bit | 59ddab507b02c13a45913c05a4799ff946e63f95 | a773d7643cbb1c0008e7ea01c32615c9e6e3678c | refs/heads/master | 2022-12-31T11:53:44.344693 | 2020-10-16T09:04:01 | 2020-10-16T09:04:01 | 270,950,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,088 | py | # 다중 분류
from sklearn.datasets import load_iris
iris = load_iris()
x = iris.data
y = iris.target
print(x.shape) # (150, 4)
print(y.shape) # (150, )
# x : scaler
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x)
x = scaler.transform(x).reshape(150, 2, 2, 1)
# y : one hot encoding
from keras.utils.np_utils import to_categorical
y = to_categorical(y)
print(y.shape) # (150, 3)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size =0.8,random_state= 10)
#2. model
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPool2D, Flatten
model = Sequential()
model.add(Conv2D(10, (2, 2), input_shape = (2, 2, 1 ), activation = 'relu', padding = 'same'))
model.add(Conv2D(50, (2, 2),activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Conv2D(50, (2, 2),activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Conv2D(50, (2, 2),activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Conv2D(50,(2, 2), activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Conv2D(50, (2, 2),activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Conv2D(50,(2, 2), activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Conv2D(50, (2, 2),activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Conv2D(50, (2, 2),activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(3, activation = 'softmax'))
# callbacks
from keras.callbacks import EarlyStopping, TensorBoard, ModelCheckpoint
# earlystopping
es = EarlyStopping(monitor = 'val_loss', patience = 50, verbose =1)
# Tensorboard
ts_board = TensorBoard(log_dir = 'graph', histogram_freq= 0,
write_graph = True, write_images=True)
# Checkpoint
modelpath = './model/{epoch:02d}-{val_loss:.4f}.hdf5'
ckecpoint = ModelCheckpoint(filepath = modelpath, monitor = 'val_loss',
save_best_only= True)
#3. compile, fit
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['acc'])
hist = model.fit(x_train, y_train, epochs =100, batch_size= 64,
validation_split = 0.2, verbose = 2,
callbacks = [es])
# evaluate
loss, acc = model.evaluate(x_test, y_test, batch_size = 64)
print('loss: ', loss )
print('acc: ', acc)
# graph
import matplotlib.pyplot as plt
plt.figure(figsize = (10, 5))
# 1
plt.subplot(2, 1, 1)
plt.plot(hist.history['loss'], c= 'red', marker = '^', label = 'loss')
plt.plot(hist.history['val_loss'], c= 'cyan', marker = '^', label = 'val_loss')
plt.title('loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
# 2
plt.subplot(2, 1, 2)
plt.plot(hist.history['acc'], c= 'red', marker = '^', label = 'acc')
plt.plot(hist.history['val_acc'], c= 'cyan', marker = '^', label = 'val_acc')
plt.title('accuarcy')
plt.xlabel('epochs')
plt.ylabel('acc')
plt.legend()
plt.show() | [
"elf0508@naver.com"
] | elf0508@naver.com |
3a6f59ae3607d35583d9c3f4b8a8aede43a77042 | 1e4d852a59e6f16d70fb05e74f5b8d6e52bbc5d7 | /data_visualization/15/5/rw_visual.py | 074f07253417fcf0ddce0a00d6e446f6e87c938f | [] | no_license | 1000monkeys/probable-invention | 6cb32fae592f7752c77c295a4be2d500e0a55ec9 | adf42f00c32ab7eb165d78dde3703eba3037356d | refs/heads/master | 2021-06-29T17:08:52.810761 | 2020-10-08T12:45:20 | 2020-10-08T12:45:20 | 168,561,027 | 0 | 0 | null | 2020-09-16T12:38:46 | 2019-01-31T16:54:48 | Python | UTF-8 | Python | false | false | 753 | py | import matplotlib.pyplot as plt
from random_walk import RandomWalk
while True:
# Make a random walk, and plot the points.
rw = RandomWalk(500000)
rw.fill_walk()
plt.figure(figsize=(10, 6))
point_numbers = list(range(rw.num_points))
plt.scatter(rw.x_values, rw.y_values, c=point_numbers, cmap=plt.cm.Blues, edgecolor='none', s=1)
# Emphasize the first and last points.
plt.scatter(0, 0, c='green', edgecolors='none', s=100)
plt.scatter(rw.x_values[-1], rw.y_values[-1], c='red', edgecolors='none', s=100)
plt.axes().get_xaxis().set_visible(False)
plt.axes().get_yaxis().set_visible(False)
plt.show()
keep_running = input("Make another walk? (y/n): ")
if keep_running == 'n':
break
| [
"vos.kjell@gmail.com"
] | vos.kjell@gmail.com |
af9477decdec1d2bfb03a8a5787df8343ad1b196 | b8e9dd6fd8f8b691cba5a3af2388467bcf6c90bb | /samples/openapi3/client/petstore/python-experimental/petstore_api/model/integer_min15.py | e6dcb3af72d7dc36592959b07c480cccf1a37f3f | [
"Apache-2.0"
] | permissive | FallenRiteMonk/openapi-generator | f8b98940219eecf14dc76dced4b0fbd394522aa3 | b6576d11733ecad6fa4a0a616e1a06d502a771b7 | refs/heads/master | 2023-03-16T05:23:36.501909 | 2022-09-02T01:46:56 | 2022-09-02T01:46:56 | 164,609,299 | 0 | 0 | Apache-2.0 | 2019-01-08T09:08:56 | 2019-01-08T09:08:56 | null | UTF-8 | Python | false | false | 864 | py | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from petstore_api import schemas # noqa: F401
class IntegerMin15(
schemas.Int64Schema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
class MetaOapg:
inclusive_minimum = 15
| [
"noreply@github.com"
] | FallenRiteMonk.noreply@github.com |
f7a92fcc0db6370db2bf509626dc91e8f3bf07f4 | de8336cbcaa51a5156346a0f4513adf2ebc29fd3 | /api/admin.py | d8a324370e1d999628566cabd4dbbb8476718b36 | [] | no_license | kranthi0987/djangosample | 8219b09a0d16591f274864b1fdc04ce46a31ce8a | bbae4ab38279d275353a2deb40ab9964fc6f7216 | refs/heads/master | 2020-08-23T16:23:37.896331 | 2019-10-27T15:23:40 | 2019-10-27T15:23:40 | 216,661,294 | 0 | 1 | null | 2020-07-21T05:33:21 | 2019-10-21T20:39:34 | Python | UTF-8 | Python | false | false | 159 | py | from django.contrib import admin
from .models import Songs, DummyData
# Register your models here.
admin.site.register(Songs)
admin.site.register(DummyData)
| [
"kranthi0987@gmail.com"
] | kranthi0987@gmail.com |
2c1ff6bd6165f7ddf080a6f5e104a08aecb7e88e | a4b9550d36b82b0ad5d24db8c75ab0d49a8a0904 | /Electrum/asgi.py | 8e4fdffa274eed668b4c2051a8150d7f27767792 | [] | no_license | jithinvijayan007/Electrum-Assignment- | e84323a82b152fc051a3e981e793c83e9cb174bf | eb82195ebcb48a19b97738c77a30fc3307ca0514 | refs/heads/master | 2023-01-11T07:44:47.029271 | 2020-11-18T13:24:44 | 2020-11-18T13:24:44 | 313,941,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for Electrum project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Electrum.settings')
application = get_asgi_application()
| [
"jithinvijayan007@gmail.com"
] | jithinvijayan007@gmail.com |
63b3332617f7b78be4ecf567876be04b1a66db94 | b018b734af4170d34d28c474f68777597dba29ec | /venv/bin/pyrsa-decrypt | 6297436bdc29e64724b2cb09382dc7feaf97b4c6 | [] | no_license | abdulkhan94/BigDataTechnology | ae0b7f8c03831f07b791bc5898c2bb18a4c3fec5 | 7be6d3a13e8fd42d9592d7287d694d507f9070b5 | refs/heads/master | 2023-02-13T04:07:49.070798 | 2021-01-11T01:34:51 | 2021-01-11T01:34:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/Users/abdullahkhan/PycharmProjects/CloudKhan/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import decrypt
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(decrypt())
| [
"abdullahn@gmail.com"
] | abdullahn@gmail.com | |
cfe2831a6522b71a9b9499c6a825bf3ae2606a14 | 7102ec163136e65c4da47658e669fde07521aaf1 | /app/setup.py | 2ac412c5412da5c4f619cae9a324bfdeedb2fd39 | [] | no_license | ghuntley/nomad-with-nix | 170d5ab571d9ae92fc1c420cd29ec94042e1c243 | bc5ff4afea2ed89074479c6ed2c39db1c577e062 | refs/heads/master | 2023-02-10T07:21:57.801694 | 2021-01-07T20:06:58 | 2021-01-07T20:07:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | from setuptools import setup, find_packages
setup(
name="app",
py_modules=[
"main",
],
install_requires=[
"asyncpg",
"databases",
"fastapi",
"psycopg2",
"uvicorn",
],
entry_points={
"console_scripts": ["uvicorn=uvicorn:main"],
},
)
| [
"asko.soukka@iki.fi"
] | asko.soukka@iki.fi |
981a793fd86c41e5b30e11570db8718e3d216f27 | dd6ed4e1fa17ff9dd59116632964e2fad438bc83 | /eventframe/nodes/participant_list/forms.py | 7c00adf40e454b4c701b0bc744c66c54638c7838 | [] | no_license | elvisds/eventframe | 8ba3b6911ffad1d80b3c56eecf36d40c7ca3d1cc | 5a65c3671d1dea3967efdea4bf163f11bde39879 | refs/heads/master | 2021-01-15T18:04:59.943728 | 2013-08-15T18:11:39 | 2013-08-15T18:11:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | # -*- coding: utf-8 -*-
import flask.ext.wtf as wtf
from eventframe.forms import DictField
from eventframe.nodes.content import ContentForm
__all__ = ['ParticipantListForm']
class ParticipantListForm(ContentForm):
source = wtf.SelectField(u"Data Source", choices=[
('', ''), ('doattend', 'DoAttend')],
description=u"Source from which the participant list will be retrieved.")
sourceid = wtf.TextField(u"Event id",
description=u"Id of this event at the selected data source.")
api_key = wtf.TextField(u"API Key",
description=u"API key to retrieve data from the selected data source.")
participant_template = wtf.TextField("Participant template",
validators=[wtf.Required()], default='participant.html',
description=u"Template with which a participant’s directory entry will be rendered.")
properties = DictField(u"Properties")
| [
"kiran@hasgeek.com"
] | kiran@hasgeek.com |
e527dc3bc132a68f454bd616f68298778961bf12 | 84b05857cbe74d190bdbee18d442d0c720b1b84d | /Coderbyte_algorithms/Hard/BracketCombinations/test_BracketCombinations.py | bea5f4e5ce46337ed54f65b42d3923344d653408 | [] | no_license | JakubKazimierski/PythonPortfolio | 1c8c7e7b0f1358fc42a2295b807d0afafd8e88a3 | 3aa62ad36c3b06b2a3b05f1f8e2a9e21d68b371f | refs/heads/master | 2023-06-01T01:16:22.897097 | 2023-05-15T01:05:22 | 2023-05-15T01:05:22 | 311,473,524 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | '''
Unittests for BracketCombinations.py
January 2021 Jakub Kazimierski
'''
import unittest
import BracketCombinations
class test_BracketCombinations(unittest.TestCase):
'''
Class with unittests for BracketCombinations.py
'''
# region Unittests
def test_ExpectedOutput(self):
'''
Checks if returned output is as expected.
'''
output = BracketCombinations.BracketCombinations(3)
self.assertEqual(output, 5)
# endregion
if __name__ == "__main__":
'''
Main method for test cases.
'''
unittest.main() | [
"j.m.kazimierski@gmail.com"
] | j.m.kazimierski@gmail.com |
134c09442a208868e04335c154fd1be6a4f089c0 | a41e7ac731210a0cb9d198029962a086dc6b4311 | /python/helpers/pycharm/lettuce_runner.py | 2f64afc956d9bdca0e3a13d8bf65680fceb4428b | [] | no_license | gencer/intellij-community | 19e7d2eafd9da1e3ad9bddd4253a0cc91a1271e9 | dc9043c92d20ef479ea8c0a9114479c2cfd1f95f | refs/heads/master | 2020-12-13T21:53:16.304289 | 2014-11-03T10:23:07 | 2014-11-03T10:23:07 | 15,880,732 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,779 | py | # coding=utf-8
"""
BDD lettuce framework runner
TODO: Support other params (like tags) as well.
Supports only 2 params now: folder to search "features" for or file and "-s scenario_index"
"""
import argparse
import os
import _bdd_utils
__author__ = 'Ilya.Kazakevich'
from lettuce.exceptions import ReasonToFail
import lettuce
from lettuce import core
class _LettuceRunner(_bdd_utils.BddRunner):
"""
Lettuce runner (BddRunner for lettuce)
"""
def __init__(self, base_dir, what_to_run, scenarios):
"""
:param scenarios scenario numbers to run
:type scenarios list
:param base_dir base directory to run tests in
:type base_dir: str
:param what_to_run folder or file to run
:type what_to_run str
"""
super(_LettuceRunner, self).__init__(base_dir)
self.__runner = lettuce.Runner(what_to_run, ",".join(scenarios))
def _get_features_to_run(self):
super(_LettuceRunner, self)._get_features_to_run()
features = []
if self.__runner.single_feature: # We need to run one and only one feature
features = [core.Feature.from_file(self.__runner.single_feature)]
else:
# Find all features in dir
for feature_file in self.__runner.loader.find_feature_files():
feature = core.Feature.from_file(feature_file)
assert isinstance(feature, core.Feature), feature
# TODO: cut out due to https://github.com/gabrielfalcao/lettuce/issues/451 Fix when this issue fixed
feature.scenarios = filter(lambda s: not s.outlines, feature.scenarios)
if feature.scenarios:
features.append(feature)
# Choose only selected scenarios
if self.__runner.scenarios:
for feature in features:
filtered_feature_scenarios = []
for index in [i - 1 for i in self.__runner.scenarios]: # decrease index by 1
if index < len(feature.scenarios):
filtered_feature_scenarios.append(feature.scenarios[index])
feature.scenarios = filtered_feature_scenarios
return features
def _run_tests(self):
super(_LettuceRunner, self)._run_tests()
self.__install_hooks()
self.__runner.run()
def __step(self, is_started, step):
"""
Reports step start / stop
:type step core.Step
:param step: step
"""
test_name = step.sentence
if is_started:
self._test_started(test_name, step.described_at)
elif step.passed:
self._test_passed(test_name)
elif step.failed:
reason = step.why
assert isinstance(reason, ReasonToFail), reason
self._test_failed(test_name, message=reason.exception, details=reason.traceback)
elif step.has_definition:
self._test_skipped(test_name, "In lettuce, we do know the reason", step.described_at)
else:
self._test_undefined(test_name, step.described_at)
def __install_hooks(self):
"""
Installs required hooks
"""
# Install hooks
lettuce.before.each_feature(
lambda f: self._feature_or_scenario(True, f.name, f.described_at))
lettuce.after.each_feature(
lambda f: self._feature_or_scenario(False, f.name, f.described_at))
lettuce.before.each_scenario(
lambda s: self.__scenario(True, s))
lettuce.after.each_scenario(
lambda s: self.__scenario(False, s))
lettuce.before.each_background(
lambda b, *args: self._background(True, b.feature.described_at))
lettuce.after.each_background(
lambda b, *args: self._background(False, b.feature.described_at))
lettuce.before.each_step(lambda s: self.__step(True, s))
lettuce.after.each_step(lambda s: self.__step(False, s))
def __scenario(self, is_started, scenario):
"""
Reports scenario launched
:type scenario core.Scenario
:param scenario: scenario
"""
if scenario.outlines:
scenario.steps = [] # Clear to prevent running. TODO: Fix when this issue fixed
scenario.background = None # TODO: undocumented
return
self._feature_or_scenario(is_started, scenario.name, scenario.described_at)
if __name__ == "__main__":
(base_dir, scenarios, what_to_run) = _bdd_utils.get_what_to_run_by_env(os.environ)
if len(what_to_run) > 1:
raise Exception("Lettuce can't run more than one file now")
_bdd_utils.fix_win_drive(what_to_run[0])
_LettuceRunner(base_dir, what_to_run[0], scenarios).run() | [
"Ilya.Kazakevich@jetbrains.com"
] | Ilya.Kazakevich@jetbrains.com |
3afd486fd50e5acb41db7c6f19412760f26eb79e | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/01_netCDF_extraction/merra902Combine/757-tideGauge.py | b5cc5d83438152989fa3ddf082f55d7b833dd4b9 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,376 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 17 11:28:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
# dir_name = 'F:\\01_erainterim\\01_eraint_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraLocalized"
dir_out = "/lustre/fs0/home/mtadesse/merraAllCombined"
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 757
y = 758
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#check for empty folders
if len(os.listdir()) == 0:
continue
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0'], axis = 1, inplace=True)
#sort based on date as merra files are scrambled
pred.sort_values(by = 'date', inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
tg_name = str(tg)+"_"+tg_name;
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
3481718d73cd06e14af89d79412405e9ae54e588 | aa5a8163c450a7ca7f4d3e7483213bb3642e0824 | /map/google/main.py | 6a8dbbb4d4e21e23144734bc0d11b43403bfd5b9 | [
"Apache-2.0"
] | permissive | kosyachniy/dev | e068bf3c9ad4d3808c70e5eb2afdafc2ef796482 | 41f58e72a397c7ff0df26dfa3e19dc64c8eff1d2 | refs/heads/main | 2023-06-25T10:36:56.333731 | 2023-06-15T01:26:19 | 2023-06-15T01:26:19 | 89,030,562 | 21 | 4 | Apache-2.0 | 2023-06-02T20:19:17 | 2017-04-21T23:13:54 | HTML | UTF-8 | Python | false | false | 1,535 | py | import json
from datetime import datetime
import googlemaps
with open('keys.json', 'r') as file:
KEY = json.loads(file.read())['google']['maps']['key']
gmaps = googlemaps.Client(key=KEY)
# Координаты по адресу
address = '1600 Amphitheatre Parkway, Mountain View, CA'
coords = gmaps.geocode(address)
print(coords)
# Адрес по координатам
coords = (40.714224, -73.961452)
address = gmaps.reverse_geocode(coords)
print(address)
# Маршрут
now = datetime.now()
directions_result = gmaps.directions(
'Sydney Town Hall',
'Parramatta, NSW',
mode="transit",
departure_time=now,
)
print(directions_result)
# Матрица расстояний
origins = [
'Perth, Australia',
'Sydney, Australia',
'Melbourne, Australia',
'Adelaide, Australia',
]
destinations = [
'Blue Mountains, Australia',
'Bungle Bungles, Australia',
'The Pinnacles, Australia',
]
matrix = gmaps.distance_matrix(origins, destinations)
print(matrix)
# Место в радиусе
# place = gmaps.find_place(
# 'Restaurant',
# 'textquery',
# fields=[
# 'place_id',
# 'geometry/location',
# 'name',
# 'formatted_address',
# 'photos',
# 'price_level',
# 'rating',
# 'types',
# ],
# location_bias='circle:0.5@47.390325,8.515934',
# )
# print(place)
geo = {
'lat': 47.390325,
'lng': 8.515934,
}
radius = 1000 # в метрах
category = 'Food'
places = gmaps.places_nearby(
location=(geo['lat'], geo['lng']),
radius=radius,
keyword = category,
)['results']
print(places) | [
"polozhev@mail.ru"
] | polozhev@mail.ru |
7ecaf8a263fb938f42a3db5c6b5a11abb11136b8 | 3a9b154aa9d5e379683476f80f30630bf44d2102 | /Server_v1/amazon/migrations/0014_auto_20190805_1705.py | e5c8d37fc9418b6c6303cd36e306d47c4f28f273 | [] | no_license | KevinDon/py_amazon_analysis | 81995e360d2b536e1df6e515aae9457054edae29 | 13b5fbb046ca6516ac3a47e8f7867baf358011f4 | refs/heads/master | 2022-12-13T00:27:27.511783 | 2019-08-14T11:45:53 | 2019-08-14T11:45:53 | 185,160,162 | 0 | 1 | null | 2022-12-10T05:38:15 | 2019-05-06T08:56:40 | TSQL | UTF-8 | Python | false | false | 1,255 | py | # Generated by Django 2.2 on 2019-08-05 09:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('amazon', '0013_auto_20190802_1106'),
]
operations = [
migrations.CreateModel(
name='AmazonProductCategoryKeywordRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amazon_category_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='amazon.AmazonProductCategoryModel', verbose_name='Amazon Category')),
('amazon_keyword_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='amazon.SkuKeywordModel', verbose_name='Amazon Keyword')),
],
options={
'db_table': 'amazon_product_category_keyword_relation',
},
),
migrations.AddField(
model_name='amazonproductcategorymodel',
name='keyword',
field=models.ManyToManyField(through='amazon.AmazonProductCategoryKeywordRelation', to='amazon.SkuKeywordModel', verbose_name='Keyword'),
),
]
| [
"kevintang002@gmail.com"
] | kevintang002@gmail.com |
fc217223fd2c75a5608bbcb6e46372fa4b31ff90 | 9ef35f89227d474f3664d27dbe3ba63bd52f8422 | /toripscanner/toripscanner.py | 8f515aaa5a0e7c397fec030a71b223c72779dbb7 | [] | no_license | oftc/toripscanner | 9ec7e4feac39172247b678a7d85fdca83f73fde0 | f0365bd3d25b49e786ceac010796084e431ad2ec | refs/heads/master | 2023-07-02T08:49:44.490859 | 2021-08-15T03:13:22 | 2021-08-15T03:13:22 | 390,759,119 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,201 | py | import logging
from argparse import ArgumentParser
from typing import Dict, Any
import toripscanner.cmd.scan
import toripscanner.cmd.parse
from . import __version__
from .config import get_config, config_logging
log = logging.getLogger(__name__)
def create_parser():
p = ArgumentParser()
p.add_argument('--version', action='version', version=__version__)
p.add_argument('-c', '--config', help='Path to toripscanner config file')
# p.add_argument(
# '-d', '--datadir', help='If provided, overwrite the coord/worker '
# 'datadir config file option with this')
# p.add_argument('--log-level',
# choices=['debug', 'info', 'warning', 'error', 'critical'],
# help='Override the configured toripscanner log level')
sub = p.add_subparsers(dest='cmd')
toripscanner.cmd.scan.gen_parser(sub)
toripscanner.cmd.parse.gen_parser(sub)
return p
def overwrite_conf(args, conf) -> None:
''' Some arguments will overwrite configuration values. Do that. '''
pass
# if args.datadir:
# assert args.cmd
# old = conf[args.cmd]['datadir']
# log.debug(
# f'Changing {args.cmd}.datadir from {old} to {args.datadir}')
# conf[args.cmd]['datadir'] = args.datadir
# This function needs **some sort** of type annotation so that mypy will check
# the things it does. Adding the return value (e.g. '-> None') is enough
def call_real_main(args, conf) -> None:
''' Figure out what command the user gave and call into that
command's main function where the real work begins to happen. The only
logic here should be figuring out what command's main to call. '''
# Most (actually, all as of right now) command's main functions take these
# arguments
def_args = [args, conf]
def_kwargs: Dict[str, Any] = {}
# How to call in to each command's main
cmds = {
'scan': {
'f': toripscanner.cmd.scan.main,
'a': def_args, 'kw': def_kwargs,
},
'parse': {
'f': toripscanner.cmd.parse.main,
'a': def_args, 'kw': def_kwargs,
},
}
# The keys in the `cmds` dict must be the same as each command specified in
# its gen_parser(...) function, thus it will be in `cmds`. args.cmd will
# also be non-None because our caller must have checked that already.
assert args.cmd in cmds
# Here we go!
cmd = cmds[args.cmd]
return cmd['f'](*cmd['a'], *cmd['kw']) # type: ignore
def main() -> None:
''' Entry point when called on the command line as `toripscanner ...`.
Do boring boilerplate stuff to get started initially. Parse the command
line arguments and configuration file, then hand off control. This is where
the bulk of the startup boring crap should happen. '''
p = create_parser()
args = p.parse_args()
if args.cmd is None:
p.print_help()
return
try:
conf = get_config(args.config)
except FileNotFoundError as e:
log.critical('Unable to open a config file: %s', e)
return
assert conf
config_logging(conf)
overwrite_conf(args, conf)
call_real_main(args, conf)
| [
"sirmatt@ksu.edu"
] | sirmatt@ksu.edu |
b4c37d818e3bcccfd45e9c2b5690f7cbafa56c01 | a0664f0d3950a7147f84a317b2e417e0433a4ae4 | /test/test_invoice_billing_address.py | 431b1384493ea8ad7498fc318d0b7f8ca6793de5 | [] | no_license | reepay/reepay-checkout-python | 1e748893a970d28c6b0242bc7d26aa21325abb32 | 8bbd36219335a1fc65f857ac537ff4931bc6e5c7 | refs/heads/master | 2020-10-01T20:10:29.660872 | 2019-12-12T13:39:46 | 2019-12-12T13:39:46 | 227,615,447 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | # coding: utf-8
"""
Reepay Checkout API
Reepay Checkout REST API # noqa: E501
OpenAPI spec version: 1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.invoice_billing_address import InvoiceBillingAddress # noqa: E501
from swagger_client.rest import ApiException
class TestInvoiceBillingAddress(unittest.TestCase):
"""InvoiceBillingAddress unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInvoiceBillingAddress(self):
"""Test InvoiceBillingAddress"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.invoice_billing_address.InvoiceBillingAddress() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"robert@reepay.com"
] | robert@reepay.com |
a3da31f13b5f6cd102d61aee6848e0d8c48d0510 | d01c2f82838e246076b0fd3514bc21119a4ee792 | /guillotina/cookiecutter/application/{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/__init__.py | 18d4c2bcbdcb48e447eb79bd02cc35918d93172d | [
"BSD-2-Clause"
] | permissive | jordic/guillotina | 0940a935264aee189daf4a21b50f0efc1650ffbd | 8845454a784e797b90951580e1ab5fa9172055e1 | refs/heads/master | 2020-03-07T15:37:30.139620 | 2018-06-22T16:37:40 | 2018-06-22T16:37:40 | 127,559,927 | 0 | 0 | BSD-2-Clause | 2018-03-31T18:30:34 | 2018-03-31T18:30:33 | null | UTF-8 | Python | false | false | 302 | py | from guillotina import configure
app_settings = {
# provide custom application settings here...
}
def includeme(root):
"""
custom application initialization here
"""
configure.scan('{{cookiecutter.package_name}}.api')
configure.scan('{{cookiecutter.package_name}}.install')
| [
"vangheem@gmail.com"
] | vangheem@gmail.com |
a79abc6d3e3ba8d2fd884627dc5bfe8c07440687 | 7c79c8caee77d08aa05cdc59eb68e569abf54a7e | /ics 32/ics 32 larc/animal_class.py | d80f309fccc67c06da02e0f1e1c7808aca2fe846 | [] | no_license | solomc1/python | 2e4715cc24e7b23d91c879fc95954f615a615982 | 119e388fb6f4ab42f581e48393919d4052a08ef6 | refs/heads/master | 2021-01-17T16:48:02.671810 | 2016-07-29T05:27:50 | 2016-07-29T05:27:50 | 64,452,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | #Create 4 animal classes that have functions growl and eat
#Ask a user which animal they want and make it growl and eat
class Zebra:
def growl(self)->str:
print("I'm eating grass...")
class Giraffe:
def growl(self)->str:
print("I'm eating cupcakes...")
class Moose:
def growl(self)->str:
print("I'm eating pho")
class Tiger:
def growl(self)->str:
print("I'm eating you...")
u_i = input("Choose an animal: ").lower()
if u_i == 'z':
animal = Zebra()
elif u_i == 'g':
animal = Giraffe()
elif u_i == 'm':
Moose()
elif u_i == 't':
Tiger()
animal.growl()
| [
"solomc1@uci.edu"
] | solomc1@uci.edu |
52f46ec16cb48b6b7c28fce60dffa9fca6a97c14 | 2899430814db2d06f8f5e105e02c731047013f93 | /backend/users/migrations/0002_auto_20200828_1219.py | 5e24b577abdd99c6db1761c8b96e6c31990cae83 | [] | no_license | crowdbotics-apps/project-1-19840 | 8de5a8e8f07aa35a7445c5943613766b793b0790 | 2f9c890ba6bc7d734f474a5141ad9d9018725761 | refs/heads/master | 2022-12-04T01:51:43.336555 | 2020-08-28T12:19:53 | 2020-08-28T12:19:53 | 291,038,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # Generated by Django 2.2.15 on 2020-08-28 12:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0001_initial'),
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='group',
field=models.ManyToManyField(blank=True, related_name='user_group', to='course.Group'),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
8e8764ab12c5d3b41e59f47cd5a9fbb3c7dd5edc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02618/s903697509.py | c5447f9b8ecd4c99caea4697471af46080b6fba0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,955 | py | from random import randint, random, seed
from math import exp
import sys
input = sys.stdin.buffer.readline
INF = 9223372036854775808
def calc_score(D, C, S, T):
"""
開催日程Tを受け取ってそこまでのスコアを返す
コンテストi 0-indexed
d 0-indexed
"""
score = 0
last = [0]*26 # コンテストiを前回開催した日
for d, t in enumerate(T):
last[t] = d + 1
for i in range(26):
score -= (d + 1 - last[i]) * C[i]
score += S[d][t]
return score
def update_score(D, C, S, T, score, ct, ci):
"""
ct日目のコンテストをコンテストciに変更する
スコアを差分更新する
ct: change t 変更日 0-indexed
ci: change i 変更コンテスト 0-indexed
"""
new_score = score
last = [0]*26 # コンテストiを前回開催した日
prei = T[ct] # 変更前に開催する予定だったコンテストi
for d, t in enumerate(T, start=1):
last[t] = d
new_score += (d - last[prei])*C[prei]
new_score += (d - last[ci])*C[ci]
last = [0]*26
for d, t in enumerate(T, start=1):
if d-1 == ct:
last[ci] = d
else:
last[t] = d
new_score -= (d - last[prei])*C[prei]
new_score -= (d - last[ci])*C[ci]
new_score -= S[ct][prei]
new_score += S[ct][ci]
return new_score
def update_swap_score(D, C, S, T, score, ct1, ct2):
"""
ct1日目のコンテストとct2日目のコンテストを入れ替える
スコアを差分更新する
"""
new_score = score
new_T = T.copy()
last = [0]*26 # コンテストiを前回開催した日
ci1 = new_T[ct1]
ci2 = new_T[ct2]
for d, t in enumerate(new_T, start=1):
last[t] = d
new_score += (d - last[ci1])*C[ci1]
new_score += (d - last[ci2])*C[ci2]
last = [0]*26
new_T[ct1], new_T[ct2] = new_T[ct2], new_T[ct1]
for d, t in enumerate(new_T, start=1):
last[t] = d
new_score -= (d - last[ci1])*C[ci1]
new_score -= (d - last[ci2])*C[ci2]
return new_score
def evaluate(D, C, S, T, k):
"""
d日目終了時点での満足度を計算し,
d + k日目終了時点での満足度の減少も考慮する
"""
score = 0
last = [0]*26
for d, t in enumerate(T):
last[t] = d + 1
for i in range(26):
score -= (d + 1 - last[i]) * C[i]
score += S[d][t]
for d in range(len(T), min(len(T) + k, D)):
for i in range(26):
score -= (d + 1 - last[i]) * C[i]
return score
def greedy(D, C, S):
Ts = []
for k in range(5, 13):
T = [] # 0-indexed
max_score = -INF
for d in range(D):
# d+k日目終了時点で満足度が一番高くなるようなコンテストiを開催する
max_score = -INF
best_i = 0
for i in range(26):
T.append(i)
score = evaluate(D, C, S, T, k)
if max_score < score:
max_score = score
best_i = i
T.pop()
T.append(best_i)
Ts.append((max_score, T))
return max(Ts, key=lambda pair: pair[0])
def local_search(D, C, S, score, T):
# sTime = time()
T0 = 2122.982998363944
T1 = 1
# TL = 1.8
Temp = T0
# cnt = 0
t = 0
best_score = score
best_T = T.copy()
for cnt in range(48000):
if cnt % 200 == 0:
t = cnt / (160000 - 1)
Temp = pow(T0, 1-t) * pow(T1, t)
sel = randint(1, 100)
lim = random()
if sel != 1:
# ct 日目のコンテストをciに変更
ct = randint(0, D-1)
ci = randint(0, 25)
new_score = update_score(D, C, S, T, score, ct, ci)
if score < new_score or \
(lim < exp((new_score - score)/Temp)):
T[ct] = ci
score = new_score
else:
# ct1 日目と ct2 日目のコンテストをswap
ct1 = randint(0, D-1)
ct2 = randint(0, D-1)
ci1 = T[ct1]
ci2 = T[ct2]
new_score = update_score(D, C, S, T, score, ct1, ci2)
new_score = update_score(D, C, S, T, new_score, ct2, ci1)
if score < new_score or \
(lim < exp((new_score - score)/Temp)):
score = new_score
T[ct1] = ci2
T[ct2] = ci1
if best_score < score:
best_score = score
best_T = T.copy()
# cnt += 1
return best_T
if __name__ == '__main__':
seed(1)
D = int(input())
C = [int(i) for i in input().split()]
S = [[int(i) for i in input().split()] for j in range(D)]
init_score, T = greedy(D, C, S)
T = local_search(D, C, S, init_score, T)
for t in T:
print(t+1)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
676f959d3e7c34fdaac7aeaf0ceb46cc93f51d71 | 0e0d6e6332e8d783b1f1da961f496a647f88eb77 | /ryu/controller/dp_type.py | dc9704c0ea346117bf2ae99a2ebe0712778d4d34 | [
"Apache-2.0"
] | permissive | unifycore/ryu | 667a957c42a2e3c23780e52d3d72457fbd7487c8 | 3cdfd5957e5cc20a90dd6bc0070f1795f31afc44 | refs/heads/master | 2021-01-21T19:54:14.558501 | 2018-02-09T22:15:06 | 2018-02-09T22:15:06 | 14,217,663 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# datapath type
# At this moment, this information is not used yet and unknown type is
# defined as place-holder.
# switches are categorized by its rolls and openflow controller may want to
# handle switch differently depending on it role.
#
# unknown:
#
UNKNOWN = 'UNKNOWN'
| [
"fujita.tomonori@lab.ntt.co.jp"
] | fujita.tomonori@lab.ntt.co.jp |
dcc02ccfc560b15d59d43056ae81ae14687d84c7 | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /11_Time_Series_Forecasting_with_Python/05/airline_boxcox_auto.py | 46d48d9fe18da8804d0ee8aaf3433f7d3b894dff | [] | no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | # automatically box-cox transform a time series
from matplotlib import pyplot
from pandas import DataFrame
from pandas import read_csv
from scipy.stats import boxcox
series = read_csv('airline-passengers.csv', header=0, index_col=0, parse_dates=True, squeeze=True)
dataframe = DataFrame(series.values)
dataframe.columns = ['passengers']
dataframe['passengers'], lam = boxcox(dataframe['passengers'])
print('Lambda: %f' % lam)
pyplot.figure(1)
# line plot
pyplot.subplot(211)
pyplot.plot(dataframe['passengers'])
# histogram
pyplot.subplot(212)
pyplot.hist(dataframe['passengers'])
pyplot.show()
| [
"jgrimes@jgrimes.tech"
] | jgrimes@jgrimes.tech |
3fdc94789dbc47b5910241a83c5ad57671bc9e35 | 03dea3c0db7c8fafda71d23c3c2595f563ffb335 | /SignalMC/test/AMSB_chargino900GeV_ctau10cm_step1.py | b612a64808e5b716cb361082e95b61d933ed4b7b | [] | no_license | Andersen98/DisappTrks | 3952e9bf8ba270e2d88aa2e8d9ef805cf25dfc46 | 140a5efdc4c51a30e5fced6d34b7813876c2f2ee | refs/heads/master | 2020-06-27T03:41:59.136790 | 2017-07-12T15:19:18 | 2017-07-12T15:19:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,070 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: DisappTrks/SignalMC/python/AMSB_chargino900GeV_ctau10cm_NoFilter_13TeV.py --fileout file:AMSB_chargino700GeV_ctau10cm_step1.root --mc --eventcontent RAWSIM --customise SLHCUpgradeSimulations/Configuration/postLS1Customs.customisePostLS1,Configuration/DataProcessing/Utils.addMonitoring,SimG4Core/CustomPhysics/Exotica_HSCP_SIM_cfi,DisappTrks/SignalMC/genParticlePlusGeant.customizeProduce,DisappTrks/SignalMC/genParticlePlusGeant.customizeKeep --datatier GEN-SIM --conditions MCRUN2_71_V1::All --beamspot Realistic50ns13TeVCollision --step GEN,SIM --magField 38T_PostLS1 --python_filename AMSB_chargino900GeV_ctau10cm_step1.py --no_exec -n 46
import FWCore.ParameterSet.Config as cms
process = cms.Process('SIM')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.Geometry.GeometrySimDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic50ns13TeVCollision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(46)
)
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.19 $'),
annotation = cms.untracked.string('DisappTrks/SignalMC/python/AMSB_chargino900GeV_ctau10cm_NoFilter_13TeV.py nevts:46'),
name = cms.untracked.string('Applications')
)
# Output definition
process.RAWSIMoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.RAWSIMEventContent.outputCommands,
fileName = cms.untracked.string('file:AMSB_chargino700GeV_ctau10cm_step1.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('GEN-SIM')
),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
)
)
# Additional output definition
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'MCRUN2_71_V1::All', '')
process.generator = cms.EDFilter("Pythia6GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(3),
comEnergy = cms.double(13000.0),
particleFile = cms.untracked.string('DisappTrks/SignalMC/data/geant4_AMSB_chargino_900GeV_ctau10cm.slha'),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
processFile = cms.untracked.string('SimG4Core/CustomPhysics/data/RhadronProcessList.txt'),
useregge = cms.bool(False),
slhaFile = cms.untracked.string('DisappTrks/SignalMC/data/AMSB_chargino_900GeV_Isajet780.slha'),
massPoint = cms.untracked.int32(900),
hscpFlavor = cms.untracked.string('stau'),
PythiaParameters = cms.PSet(
pythiaUESettings = cms.vstring('MSTU(21)=1 ! Check on possible errors during program execution',
'MSTJ(22)=2 ! Decay those unstable particles',
'PARJ(71)=10 . ! for which ctau 10 mm',
'MSTP(33)=0 ! no K factors in hard cross sections',
'MSTP(2)=1 ! which order running alphaS',
'MSTP(51)=10042 ! structure function chosen (external PDF CTEQ6L1)',
'MSTP(52)=2 ! work with LHAPDF',
'PARP(82)=1.921 ! pt cutoff for multiparton interactions',
'PARP(89)=1800. ! sqrts for which PARP82 is set',
'PARP(90)=0.227 ! Multiple interactions: rescaling power',
'MSTP(95)=6 ! CR (color reconnection parameters)',
'PARP(77)=1.016 ! CR',
'PARP(78)=0.538 ! CR',
'PARP(80)=0.1 ! Prob. colored parton from BBR',
'PARP(83)=0.356 ! Multiple interactions: matter distribution parameter',
'PARP(84)=0.651 ! Multiple interactions: matter distribution parameter',
'PARP(62)=1.025 ! ISR cutoff',
'MSTP(91)=1 ! Gaussian primordial kT',
'PARP(93)=10.0 ! primordial kT-max',
'MSTP(81)=21 ! multiple parton interactions 1 is Pythia default',
'MSTP(82)=4 ! Defines the multi-parton model'),
processParameters = cms.vstring('IMSS(1) = 11 ! Spectrum from external SLHA file',
'IMSS(21) = 33 ! LUN number for SLHA File (must be 33) ',
'IMSS(22) = 33 ! Read-in SLHA decay table ',
'MSEL = 0 ! General SUSY',
'MSUB(226) = 1 ! to double chargino',
'MSUB(229) = 1 ! to neutralino + chargino',
'MDCY(312,1) = 0 ! set the chargino stable.'),
parameterSets = cms.vstring('pythiaUESettings',
'processParameters',
'SLHAParameters'),
SLHAParameters = cms.vstring('SLHAFILE = DisappTrks/SignalMC/data/AMSB_chargino_900GeV_Isajet780.slha')
)
)
# Path and EndPath definitions
process.generation_step = cms.Path(process.pgen)
process.simulation_step = cms.Path(process.psim)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RAWSIMoutput_step = cms.EndPath(process.RAWSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,process.genfiltersummary_step,process.simulation_step,process.endjob_step,process.RAWSIMoutput_step)
# filter all path with the production filter sequence
for path in process.paths:
getattr(process,path)._seq = process.generator * getattr(process,path)._seq
# customisation of the process.
# Automatic addition of the customisation function from DisappTrks.SignalMC.genParticlePlusGeant
from DisappTrks.SignalMC.genParticlePlusGeant import customizeProduce,customizeKeep
#call to customisation function customizeProduce imported from DisappTrks.SignalMC.genParticlePlusGeant
process = customizeProduce(process)
#call to customisation function customizeKeep imported from DisappTrks.SignalMC.genParticlePlusGeant
process = customizeKeep(process)
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.postLS1Customs
from SLHCUpgradeSimulations.Configuration.postLS1Customs import customisePostLS1
#call to customisation function customisePostLS1 imported from SLHCUpgradeSimulations.Configuration.postLS1Customs
process = customisePostLS1(process)
# Automatic addition of the customisation function from SimG4Core.CustomPhysics.Exotica_HSCP_SIM_cfi
from SimG4Core.CustomPhysics.Exotica_HSCP_SIM_cfi import customise
#call to customisation function customise imported from SimG4Core.CustomPhysics.Exotica_HSCP_SIM_cfi
process = customise(process)
# End of customisation functions
| [
"ahart@cern.ch"
] | ahart@cern.ch |
47fc3d5c03c500f55864283af007e7e341651e9e | 50a690ab7db8fe98a620f3c54aabd90c3ff3e7f3 | /losses/triplet_loss_test.py | 6348bc6af5eac3386abfe5d64a0b4bfb51155046 | [] | no_license | yekeren/ADVISE-Image_ads_understanding | 590754909d2f4259a57d32591a15bea845586a0f | 2ea5e1405b1ab178b95f9c2cd9158b16847ac6a3 | refs/heads/master | 2021-10-02T08:01:29.193553 | 2018-11-29T16:32:25 | 2018-11-29T16:32:25 | 103,291,233 | 22 | 8 | null | null | null | null | UTF-8 | Python | false | false | 5,369 | py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from losses import triplet_loss
slim = tf.contrib.slim
class TripletLossTest(tf.test.TestCase):
def setUp(self):
tf.logging.set_verbosity(tf.logging.INFO)
def test1(self):
g = tf.Graph()
with g.as_default():
labels = tf.placeholder(shape=[4], dtype=tf.int32)
positive_masks = tf.sparse_to_dense(
tf.stack([tf.range(4), labels], 1),
output_shape=tf.stack([4, 10]),
sparse_values=True,
default_value=False,
validate_indices=True)
with self.test_session(graph=g) as sess:
mat = sess.run([positive_masks], feed_dict={
labels: np.array([0, 1, 2, 3])})
print(mat)
def test_mine_random_examples(self):
g = tf.Graph()
with g.as_default():
distances = tf.placeholder(shape=[None, None], dtype=tf.float32)
pos_indices, neg_indices = triplet_loss._mine_random_examples(distances, 4)
with self.test_session(graph=g) as sess:
pos, neg = sess.run([pos_indices, neg_indices],
feed_dict={distances: np.ones([4, 4])})
self.assertEqual(pos.shape, (16,))
self.assertEqual(neg.shape, (16,))
for i in xrange(16):
self.assertNotEqual(pos[i], neg[i])
def test_mine_all_examples(self):
g = tf.Graph()
with g.as_default():
distances = tf.placeholder(shape=[None, None], dtype=tf.float32)
pos_indices, neg_indices = triplet_loss._mine_all_examples(distances)
with self.test_session(graph=g) as sess:
pos, neg = sess.run([pos_indices, neg_indices],
feed_dict={distances: np.ones([4, 4])})
self.assertAllEqual(pos, np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]))
self.assertAllEqual(neg, np.array([1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2]))
pos, neg = sess.run([pos_indices, neg_indices],
feed_dict={distances: np.ones([5, 5])})
self.assertAllEqual(pos, np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3,
3, 3, 3, 4, 4, 4, 4]))
self.assertAllEqual(neg, np.array([1, 2, 3, 4, 0, 2, 3, 4, 0, 1, 3, 4, 0,
1, 2, 4, 0, 1, 2, 3]))
def test_mine_semi_hard_examples(self):
g = tf.Graph()
with g.as_default():
distances = tf.placeholder(shape=[None, None], dtype=tf.float32)
pos_indices, neg_indices = triplet_loss._mine_semi_hard_examples(distances)
with self.test_session(graph=g) as sess:
pos, neg = sess.run([pos_indices, neg_indices],
feed_dict={distances: np.array([
[0, 1, 2, 3],
[2, 0, 0, 3],
[3, 1, 0, 0],
[1, 3, 2, 0],
])})
self.assertAllEqual(pos, np.array([0, 0, 0, 1, 1, 2, 2, 3, 3, 3]))
self.assertAllEqual(neg, np.array([1, 2, 3, 0, 3, 0, 1, 0, 1, 2]))
def test_mine_hard_examples(self):
g = tf.Graph()
with g.as_default():
distances = tf.placeholder(shape=[None, None], dtype=tf.float32)
pos_indices, neg_indices = triplet_loss._mine_hard_examples(distances, 1)
with self.test_session(graph=g) as sess:
pos, neg = sess.run([pos_indices, neg_indices],
feed_dict={distances: np.array([
[0, 1, 2, 3],
[2, 0, 0, 3],
[3, 1, 0, 0],
[1, 3, 2, 0],
])})
self.assertAllEqual(pos, np.array([0, 1, 2, 3]))
self.assertAllEqual(neg, np.array([1, 2, 3, 0]))
g = tf.Graph()
with g.as_default():
distances = tf.placeholder(shape=[None, None], dtype=tf.float32)
pos_indices, neg_indices = triplet_loss._mine_hard_examples(distances, 2)
with self.test_session(graph=g) as sess:
pos, neg = sess.run([pos_indices, neg_indices],
feed_dict={distances: np.array([
[0, 1, 2, 3],
[2, 0, 0, 3],
[3, 1, 0, 0],
[1, 3, 2, 0],
])})
self.assertAllEqual(pos, np.array([0, 0, 1, 1, 2, 2, 3, 3]))
self.assertAllEqual(neg, np.array([1, 2, 2, 0, 3, 1, 0, 2]))
g = tf.Graph()
with g.as_default():
distances = tf.placeholder(shape=[None, None], dtype=tf.float32)
pos_indices, neg_indices = triplet_loss._mine_hard_examples(distances, 3)
with self.test_session(graph=g) as sess:
pos, neg = sess.run([pos_indices, neg_indices],
feed_dict={distances: np.array([
[0, 1, 2, 3],
[2, 0, 0, 3],
[3, 1, 0, 0],
[1, 3, 2, 0],
])})
self.assertAllEqual(pos, np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]))
self.assertAllEqual(neg, np.array([1, 2, 3, 2, 0, 3, 3, 1, 0, 0, 2, 1]))
g = tf.Graph()
with g.as_default():
distances = tf.placeholder(shape=[None, None], dtype=tf.float32)
pos_indices, neg_indices = triplet_loss._mine_hard_examples(distances, 10)
with self.test_session(graph=g) as sess:
pos, neg = sess.run([pos_indices, neg_indices],
feed_dict={distances: np.array([
[0, 1, 2, 3],
[2, 0, 0, 3],
[3, 1, 0, 0],
[1, 3, 2, 0],
])})
self.assertAllEqual(pos, np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]))
self.assertAllEqual(neg, np.array([1, 2, 3, 2, 0, 3, 3, 1, 0, 0, 2, 1]))
if __name__ == '__main__':
tf.test.main()
| [
"yekeren.cn@gmail.com"
] | yekeren.cn@gmail.com |
dda29d32249fd96fba0acb1367105ab881c76624 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2493/60771/290955.py | 6c322d79efc369fa6ffdbc500c6fb16d7b654351 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | #05
n = int(input())
ori = input().split(" ")
nums = [int(item) for item in ori]
m = int(input())
for i in range(0,m):
ori = input().split(" ")
left = int(ori[0])
right = int(ori[1])
dup = []
part = nums[left-1:right]
for item in part:
if item not in dup:
dup.append(item)
print(len(dup)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
e24797ac9d7f429ee69a77a110980389a9e0edb5 | a583b9151b8ce9dd74aa7c3fc99e40055f1dcfb9 | /Coursera_PY_week8 12/DONE/3_Smallest_odd.py | aa0d2a49aa91847c449ac2dccba5b0bb7adb1b62 | [] | no_license | NineMan/Coursera_PY | df202768bc2f0a5ea06d0ed9985610e527d1a5c5 | dd9b6b085b66b979c88e5d72b26777848b6d2a9a | refs/heads/master | 2020-08-01T09:36:37.132012 | 2019-09-25T23:12:57 | 2019-09-25T23:12:57 | 210,953,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | print(
min(
filter(
lambda x: x % 2 != 0,
map(
int,
input().split()
)
)
)
)
| [
"nelubinmv@inbox.ru"
] | nelubinmv@inbox.ru |
9a4b8392b7e0b2bcec18b40fa83f092083b22e9f | 67a9ea4302d669a142549b26bc5412b83eb10de6 | /django_projects/askdjango/blog/models.py | dd889c68d771487963d834b671c033841fa19829 | [] | no_license | Chojiwon/nits-1st | 9068c8ee66e3bd06c2758516b7f4f3a52fc7499e | cbbbe301356a33c9ffc6c5a6e3bc9a99492900d5 | refs/heads/master | 2021-04-29T04:43:20.498626 | 2017-01-04T07:31:19 | 2017-01-04T07:31:19 | 77,995,108 | 1 | 0 | null | 2017-01-04T08:09:08 | 2017-01-04T08:09:08 | null | UTF-8 | Python | false | false | 2,469 | py | from django.conf import settings
from django.core.files import File
from django.core.urlresolvers import reverse
from django.core.validators import RegexValidator, MinLengthValidator
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import pre_save
from blog.utils import thumbnail
class Post(models.Model):
title = models.CharField(max_length=100, validators=[MinLengthValidator(3)])
content = models.TextField()
photo = models.ImageField(blank=True, upload_to='blog/post/%Y/%m/%d')
point = models.CharField(max_length=100, blank=True,
validators=[RegexValidator(r'^[+-]?[\d\.]+,[+-]?[\d\.]+$')])
writer = models.ForeignKey(settings.AUTH_USER_MODEL) #'auth.User')
author = models.CharField(max_length=20)
tags = models.CharField(max_length=100, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
tag_set = models.ManyToManyField('Tag', blank=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
def get_absolute_url(self):
# return '/blog/{}'.format(self.pk)
return reverse('blog:post_detail', args=[self.pk])
def as_dict(self):
if self.photo:
photo_url = self.photo.url
else:
photo_url = None
return {
'id': self.id,
'title': self.title,
'content': self.content,
'photo_url': photo_url,
'point': self.point,
'writer': self.writer.username,
'author': self.author,
'tag_set': self.tag_set.all(),
'updated_at': self.updated_at,
}
def on_pre_save_post(sender, **kwargs):
post = kwargs['instance']
if post.photo:
max_width = 300
if post.photo.width > max_width or post.photo.height > max_width:
processed_f = thumbnail(post.photo.file, max_width, max_width)
post.photo.save(post.photo.name, File(processed_f), save=False)
pre_save.connect(on_pre_save_post, sender=Post)
class Comment(models.Model):
post = models.ForeignKey(Post)
message = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-id']
class Tag(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
| [
"allieuslee@gmail.com"
] | allieuslee@gmail.com |
a969af4f57309f7a09076291ef2c2743f87a9e3a | fbba871f9a7076074f570df563881fd28c1826af | /src/anyconfig_configobj_backend/configobj_.py | 32aa1ae4dc2f6f8bd4fe18d6ff7b7948eb79257a | [
"MIT"
] | permissive | ssato/python-anyconfig-configobj-backend | 9cb254a283c2634ae15035dcae509574e08d718c | 1e6c3fb1389a8c1083a090f5e0f75e226d424def | refs/heads/master | 2023-07-27T00:05:41.811839 | 2023-07-09T06:00:54 | 2023-07-09T06:00:54 | 9,628,155 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,426 | py | #
# Copyright (C) 2013 - 2021 Satoru SATOH <satoru.satoh @ gmail.com>
# License: MIT
#
r"""Configobj backend:
- Format to support: configobj,
https://bit.ly/2TgURnL (https://configobj.readthedocs.io)
- Requirements: configobj (https://pypi.python.org/pypi/configobj/)
- Development Status :: 4 - Beta
- Limitations: AFAIK, configobj does not keep the order of configuration items
and not have options to change this behavior like configparser, so this
backend does not keep the order of configuration items even if the ac_ordered
option was used.
- Special options:
- All options except for 'infile' passed to configobj.ConfigObj.__init__
should work.
- See also: http://configobj.readthedocs.io/en/latest/configobj.html
Chnagelog:
.. versionchanged:: 0.5.0
- Now loading and dumping options are detected automatically from inspection
result if possible. Also these became not distinguished because these will
be passed to configobj.Configuration anyway.
"""
import inspect
import os
import configobj
import anyconfig.backend.base
try:
_LOAD_OPTS = [
a for a in inspect.getfullargspec(configobj.ConfigObj).args
if a not in {'self', 'infile'}
]
except (TypeError, AttributeError):
_LOAD_OPTS = ("options configspec encoding interpolation raise_errors"
"list_values create_empty file_error stringify"
"indent_type default_encoding unrepr write_empty_values"
"_inspec").split()
def make_configobj(cnf, **kwargs):
"""
Make a configobj.ConfigObj initalized with given config 'cnf'.
:param cnf: Configuration data
:param kwargs: optional keyword parameters passed to ConfigObj.__init__
:return: An initialized configobj.ConfigObj instance
"""
cobj = configobj.ConfigObj(**kwargs)
cobj.update(cnf)
return cobj
def load(path_or_strm, container, **opts):
"""
:param path_or_strm: input config file path or file/file-like object
:param container: callble to make a container object
:param opts: keyword options passed to :class:`configobj.ConfigObj`
:return: Mapping object
"""
return container(configobj.ConfigObj(path_or_strm, **opts))
class Parser(anyconfig.backend.base.StreamParser,
anyconfig.backend.base.BinaryLoaderMixin,
anyconfig.backend.base.BinaryDumperMixin):
"""
Parser for Ini-like config files which configobj supports.
"""
_cid = "configobj"
_type = "configobj"
_priority = 10
_load_opts = _LOAD_OPTS # options on dump will be just ignored.
_dump_opts = _LOAD_OPTS # Likewise.
_ordered = True
load_from_path = load_from_stream = anyconfig.backend.base.to_method(load)
def dump_to_string(self, cnf, **kwargs):
"""
Dump config 'cnf' to a string.
:param cnf: Configuration data to dump
:param kwargs: backend-specific optional keyword parameters :: dict
:return: string represents the configuration
"""
return os.linesep.join(make_configobj(cnf, **kwargs).write())
def dump_to_stream(self, cnf, stream, **kwargs):
"""
:param cnf: Configuration data to dump
:param stream: Config file or file-like object
:param kwargs: backend-specific optional keyword parameters :: dict
"""
make_configobj(cnf, **kwargs).write(stream)
# vim:sw=4:ts=4:et:
| [
"satoru.satoh@gmail.com"
] | satoru.satoh@gmail.com |
a8b1c2f0d8f7ce94ae2e6536c5b1e375271987f9 | b9c43433f5f17c4f20d6ece1798e42e3d1d7e0e2 | /lists/models.py | a386118bfc326f57ea7cbb9e07c2575227352173 | [] | no_license | fireinrain/django_superlists | e6365fa51e57eb8b3dd91d88c7737ee9d2be1c05 | fe1d6655058772a534351578c2f6e0bac5601c08 | refs/heads/master | 2022-06-18T14:32:04.318953 | 2017-01-05T05:43:21 | 2017-01-05T05:43:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | from django.db import models
# Create your models here.
class List(models.Model):
listdb = models.TextField(default='')
class Item(models.Model):
text = models.TextField(default='')
list = models.ForeignKey(List,default=None)
| [
"575563079@qq.com"
] | 575563079@qq.com |
22568b30c1c3eb26116d7633f4d2bf182d8915a9 | a84538af8bf1f763a3d71d939744976425358b30 | /contrib/devtools/symbol-check.py | 363849d7e38a7e395c0f770b6dab7eee08f352c9 | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | YourLocalDundee/foxdcoin | 8de421b280a812e390249f14ed0b5892c546ebf1 | 9db505f6f32bd3e51bd2b2da533744c98cee23af | refs/heads/master | 2023-05-14T05:10:26.435417 | 2021-06-09T06:18:50 | 2021-06-09T06:18:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,231 | py | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
from __future__ import division, print_function, unicode_literals
import subprocess
import re
import sys
import os
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used', b'stdin', b'stdout', b'stderr'
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ALLOWED_LIBRARIES = {
# foxdcoind and foxdcoin-qt
b'libgcc_s.so.1', # GCC base support
b'libc.so.6', # C library
b'libpthread.so.0', # threading
b'libanl.so.1', # DNS resolve
b'libm.so.6', # math library
b'librt.so.1', # real-time (clock)
b'ld-linux-x86-64.so.2', # 64-bit dynamic linker
b'ld-linux.so.2', # 32-bit dynamic linker
# foxdcoin-qt only
b'libX11-xcb.so.1', # part of X11
b'libX11.so.6', # part of X11
b'libxcb.so.1', # part of X11
b'libfontconfig.so.1', # font support
b'libfreetype.so.6', # font parsing
b'libdl.so.2' # programming interface to dynamic linker
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def __call__(self, mangled):
self.proc.stdin.write(mangled + b'\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>7 and re.match(b'[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition(b'@')
is_import = line[6] == b'UND'
if version.startswith(b'@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if b'_' in version:
(lib, _, ver) = version.rpartition(b'_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split(b'.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
def read_libraries(filename):
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>2 and tokens[1] == b'(NEEDED)':
match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8')))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
if sym in IGNORE_EXPORTS:
continue
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8')))
retval = 1
# Check dependency libraries
for library_name in read_libraries(filename):
if library_name not in ALLOWED_LIBRARIES:
print('%s: NEEDED library %s is not allowed' % (filename, library_name.decode('utf-8')))
retval = 1
sys.exit(retval)
| [
"foxrtb@gmail.com"
] | foxrtb@gmail.com |
c1447b9ff7375bfeff1e7c5f144b90d30ccd58b2 | 148ac8d601369aaae6918cf0a55a4d4f5afb5e75 | /dt_tennis.py | 2bb3aad648def783a4a020597f5b6c2d6fa4e52a | [] | no_license | MrVersatile007/ML-with-Rishi | a7800e27f5cbac9b68d526469beb380ed59bb029 | db76aa26ef5d349237d0fa1f0bdd677352dfb392 | refs/heads/main | 2023-06-02T15:53:00.902907 | 2021-06-27T15:15:01 | 2021-06-27T15:15:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 25 20:45:59 2021
@author: RISHBANS
"""
import pandas as pd
tennis_data = pd.read_csv("tennis.csv")
from sklearn.preprocessing import OrdinalEncoder
from sklearn.tree import DecisionTreeClassifier
o_e = OrdinalEncoder()
X = tennis_data.drop(columns=['play'])
y = tennis_data.play
X = o_e.fit_transform(X)
dt = DecisionTreeClassifier(criterion='entropy')
dt.fit(X, y)
print(o_e.categories_)
dt.predict([[1,0,1,0]])
| [
"rishibansal02@gmail.com"
] | rishibansal02@gmail.com |
b52b5d7aebe3bd624868fe6aed27ba6c5e09e006 | 9aad83265d9b0f405000be6ecf1e27886d907a45 | /pyciss/meta.py | 96aa53d3c50f1a179d330a8c9670d65c2b71ef31 | [
"ISC"
] | permissive | jamesHuffman/pyciss | b536c5d806b5b8b8692485b5f8c5da22586e5c64 | bfab517ebc411592ffbb9050136e3d95ced52db4 | refs/heads/master | 2020-05-29T08:40:15.734644 | 2017-01-04T10:05:29 | 2017-01-04T10:05:29 | 69,047,589 | 0 | 0 | null | 2016-09-23T17:44:36 | 2016-09-23T17:44:35 | null | UTF-8 | Python | false | false | 3,345 | py | """This module deals with the metadata I have received from collaborators.
It defines the location of ring resonances for the RingCube plotting.
"""
import pandas as pd
import pkg_resources as pr
def get_meta_df():
def read_metadata(f):
df = pd.read_csv(f, header=None, delim_whitespace=True)
df = df.rename(columns={0: 'id', 1: 'pixres', 14: 'lit_status'})
df = df.set_index('id')
df['is_lit'] = df.lit_status is True
# df.drop('lit_status', axis=1)
return df
with pr.resource_stream('pyciss', 'data/metadata.txt') as f:
meta_df = read_metadata(f)
return meta_df
# resonances
def get_order(name):
ratio = name.split()[1]
a, b = ratio.split(':')
return int(a) - int(b)
def get_resonances():
with pr.resource_stream('pyciss', 'data/ring_resonances.csv') as f:
resonances = pd.read_csv(f)
resonances.columns = ['name', 'radius', 'a_moon', 'n', 'kappa']
resonances = resonances.sort_values(by='radius', ascending=True)
resonances['order'] = resonances.name.map(get_order)
return resonances
def get_prime_resonances():
resonances = get_resonances()
prime_resonances = resonances[resonances.order == 1].drop('order', axis=1)
# filter out Janus and Epimetheus as we have a more precise file for that.
prime_resonances = prime_resonances.loc[~prime_resonances.name.str.startswith('Janus')]
prime_resonances = prime_resonances.loc[~prime_resonances.name.str.startswith('Epimetheus')]
return prime_resonances
# Janus Epithemeus resonances
def get_janus_epimetheus_resonances():
w = [len(' Janus1'),
len(' reson'),
len(' Resonance radius R')]
def get_janos_epi_order(reso):
a, b = reso.split(':')
return int(a) - int(b)
fname = pr.resource_filename('pyciss',
'data/ring_janus_epimetheus_resonances.txt')
with open(fname) as f:
jan_epi_resonances = pd.read_fwf(f, skiprows=15, header=0, widths=w,
skipfooter=1)
# replace column names
jan_epi_resonances.columns = ['moon', 'reson', 'radius']
# calculate order from resonance name
jan_epi_resonances['order'] = jan_epi_resonances.reson.map(get_janos_epi_order)
def func(x):
"Remove space from resonce string"
return ':'.join(i.strip() for i in x.split(':'))
jan_epi_resonances.reson = jan_epi_resonances.reson.map(func)
# calculate name for axes display
jan_epi_resonances['name'] = jan_epi_resonances.moon + ' ' +\
jan_epi_resonances.reson
return jan_epi_resonances
def get_prime_jan_epi():
jan_epi_resonances = get_janus_epimetheus_resonances()
# remove orders > 1 and drop unrequired columns
prime_jan_epis = jan_epi_resonances[jan_epi_resonances.order == 1]
to_drop = ['order', 'moon', 'reson']
prime_jan_epis = prime_jan_epis.drop(to_drop, axis=1)
return prime_jan_epis
def get_all_resonances():
prime_resonances = get_prime_resonances()
prime_jan_epis = get_prime_jan_epi()
all_resonances = pd.concat([prime_resonances, prime_jan_epis])
all_resonances.sort_values(by='radius', inplace=True)
all_resonances['moon'] = all_resonances.name.map(lambda x: x.split()[0].lower())
return all_resonances
| [
"kmichael.aye@gmail.com"
] | kmichael.aye@gmail.com |
ff23bf0fd747937e5b59cb049d04f3cd937701f2 | 494a0ba52d3204cb0082f01ae58cfdfc74895ba2 | /thisIsCodingTest/graph/42.gate.py | 69ae147b775a050386a3441d2cc6e94c185dc8b9 | [] | no_license | mhee4321/python_algorithm | 52331721c49399af35ffc863dd1d9b8e39cea26a | 96dd78390ba735dd754930affb3b72bebbbe5104 | refs/heads/master | 2023-04-26T09:27:40.760958 | 2021-05-16T12:12:39 | 2021-05-16T12:12:39 | 327,462,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | def find_parent(parent, x):
if parent[x] != x:
parent[x] = find_parent(parent, parent[x])
return parent[x]
def union_parent(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
g = int(input())
p = int(input())
parent = [0] * (g+1)
for i in range(1, g+1):
parent[i] = i
result = 0
for _ in range(p):
data = find_parent(parent, int(input()))
if data == 0:
break
union_parent(parent, data, data-1)
result += 1
print(result) | [
"nannanru@gmail.com"
] | nannanru@gmail.com |
2e312bbf9e3bffa3e8bdbcff21c0dd64f68ac42d | 6509c398816baffafa4a1fcfb2855e1bc9d1609b | /sistema-operacional/diretorios/pathlib/exemplos/pathlib-30.py | cdb96b7c3e866933e42e2503f27c3bb2564029af | [] | no_license | marcoswebermw/learning-python | 6b0dfa81a0d085f4275865dce089d9b53b494aa5 | 931ed2985b8a3fec1a48c660c089e290aaac123d | refs/heads/master | 2021-10-27T21:19:46.013020 | 2019-04-19T23:25:46 | 2019-04-19T23:25:46 | 87,670,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | # Retorna o caminho final("tronco")
# de um componente sem o sufixo(extensão).
from pathlib import Path
arquivo = Path('/usr/bin/teste.tar.gz')
print(arquivo.stem) # teste.tar
arquivo = Path('/usr/bin/teste.tar')
print(arquivo.stem) # teste | [
"marcoswebermw@gmail.com"
] | marcoswebermw@gmail.com |
e538dfa58bc6728808c15fad154b4ea83088f829 | 0aa9649e3f67d2ab3f36eb4d67d6b9196295a8ec | /src/browserrender.py | e97a81f842bd1ccc477ec19cabce3332a9eb5bb5 | [] | no_license | HussainAther/scrape | 5e094dae66fe88ed4b090797a095df95db839874 | 8c28d8d4943de34544ce18bf5f1c7223e51426a5 | refs/heads/master | 2020-06-03T16:35:10.771222 | 2019-12-14T08:33:32 | 2019-12-14T08:33:32 | 191,651,526 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,025 | py | # -*- coding: utf-8 -*-
import csv
import lxml.html
import re
import time
try:
from PySide.QtGui import QApplication
from PySide.QtCore import QUrl, QEventLoop, QTimer
from PySide.QtWebKit import QWebView
except ImportError:
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import QUrl, QEventLoop, QTimer
from PyQt4.QtWebKit import QWebView
class BrowserRender(QWebView):
"""
Create a rendering of the browser for website interaction.
"""
def __init__(self, display=True):
"""
Initialize the display with the app.
"""
self.app = QApplication([])
QWebView.__init__(self)
if display:
self.show() # show the browser
def open(self, url, timeout=60):
"""
Wait for download to complete and return result.
"""
loop = QEventLoop()
timer = QTimer()
timer.setSingleShot(True)
timer.timeout.connect(loop.quit)
self.loadFinished.connect(loop.quit)
self.load(QUrl(url))
timer.start(timeout * 1000)
loop.exec_() # delay here until download finished
if timer.isActive():
# downloaded successfully
timer.stop()
return self.html()
else:
# timed out
print("Request timed out:", url)
def html(self):
"""
Shortcut to return the current HTML.
"""
return self.page().mainFrame().toHtml()
def find(self, pattern):
"""
Find all elements that match the pattern.
"""
return self.page().mainFrame().findAllElements(pattern)
def attr(self, pattern, name, value):
"""
Set attribute for matching elements.
"""
for e in self.find(pattern):
e.setAttribute(name, value)
def text(self, pattern, value):
"""
Set attribute for matching elements.
"""
for e in self.find(pattern):
e.setPlainText(value)
def click(self, pattern):
"""
Click matching elements.
"""
for e in self.find(pattern):
e.evaluateJavaScript("this.click()")
def waitload(self, pattern, timeout=60):
"""
Wait for this pattern to be found in webpage and return matches.
"""
deadline = time.time() + timeout
while time.time() < deadline:
self.app.processEvents()
matches = self.find(pattern)
if matches:
return matches
print("Wait load timed out")
def main():
br = BrowserRender()
br.open("http://example.webscraping.com/search")
br.attr("#search_term", "value", ".")
br.text("#page_size option:checked", "1000")
br.click("#search")
elements = br.waitload("#results a")
writer = csv.writer(open("countries.csv", "w"))
for country in [e.toPlainText().strip() for e in elements]:
writer.writerow([country])
if __name__ == "__main__":
main()
| [
"shussainather@gmail.com"
] | shussainather@gmail.com |
065c63d1ae9bb96d1d7af75023910c6a1693df54 | 4766d241bbc736e070f79a6ae6a919a8b8bb442d | /archives/leetcode2/0108. Convert Sorted Array to Binary Search Tree.py | ad97a0374762928355b3da39c91085ca4dcf7558 | [] | no_license | yangzongwu/leetcode | f7a747668b0b5606050e8a8778cc25902dd9509b | 01f2edd79a1e922bfefecad69e5f2e1ff3a479e5 | refs/heads/master | 2021-07-08T06:45:16.218954 | 2020-07-18T10:20:24 | 2020-07-18T10:20:24 | 165,957,437 | 10 | 8 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sortedArrayToBST(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
if not nums:
return None
k=len(nums)//2
root=TreeNode(nums[k])
root.left=self.sortedArrayToBST(nums[:k])
root.right=self.sortedArrayToBST(nums[k+1:])
return root
| [
"noreply@github.com"
] | yangzongwu.noreply@github.com |
fde15e4db514b606e835f68194fbccfc68ce0db3 | eb3683f9127befb9ef96d8eb801206cf7b84d6a7 | /testing/test_programs/numpy/basic_numpy/stypy_test_files/numpy_mathematical_functions_trigonometrical__type_data.py | ec54b5bb797ef03f3043b380cefb64c0f8a4a21f | [] | no_license | ComputationalReflection/stypy | 61ec27333a12f76ac055d13f8969d3e0de172f88 | be66ae846c82ac40ba7b48f9880d6e3990681a5b | refs/heads/master | 2021-05-13T18:24:29.005894 | 2018-06-14T15:42:50 | 2018-06-14T15:42:50 | 116,855,812 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,073 | py | from testing.code_generation_testing.codegen_testing_common import instance_of_class_name
from stypy.types import union_type, undefined_type
from numpy import ndarray
test_types = {
'__main__': {
'r23': instance_of_class_name("ndarray"),
'r22': instance_of_class_name("ndarray"),
'r21': instance_of_class_name("ndarray"),
'r20': instance_of_class_name("ndarray"),
'__package__': instance_of_class_name("NoneType"),
'np': instance_of_class_name("module"),
'o2': instance_of_class_name("ndarray"),
'o1': instance_of_class_name("ndarray"),
'r16': instance_of_class_name("ndarray"),
'r17': instance_of_class_name("ndarray"),
'r14': instance_of_class_name("ndarray"),
'r15': instance_of_class_name("ndarray"),
'r12': instance_of_class_name("float64"),
'r13': instance_of_class_name("float64"),
'r10': instance_of_class_name("float64"),
'r11': union_type.UnionType.create_from_type_list([ndarray, undefined_type.UndefinedType]),
'__builtins__': instance_of_class_name("module"),
'__file__': instance_of_class_name("str"),
'r18': instance_of_class_name("ndarray"),
'r19': instance_of_class_name("ndarray"),
'phase': union_type.UnionType.create_from_type_list([ndarray, undefined_type.UndefinedType, tuple]),
'__name__': instance_of_class_name("str"),
'r4': instance_of_class_name("float64"),
'r5': instance_of_class_name("float64"),
'r6': instance_of_class_name("float64"),
'r7': instance_of_class_name("ndarray"),
'r1': instance_of_class_name("float64"),
'r2': instance_of_class_name("float64"),
'r3': instance_of_class_name("float64"),
'r8': union_type.UnionType.create_from_type_list([ndarray, undefined_type.UndefinedType]),
'r9': instance_of_class_name("float64"),
'__doc__': instance_of_class_name("NoneType"),
'x10': instance_of_class_name("list"),
'x': instance_of_class_name("list"),
},
}
| [
"redondojose@uniovi.es"
] | redondojose@uniovi.es |
2871d96ec3547c2cc7897191f99266569c9f2498 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2690/60765/317077.py | 228538a2c79869f701b3ffeac300c9be62b9d7f1 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import math
import sys
n=int(input())
# n,t=list(map(int,input().split()))
# serial=input().split()
# a=list(map(int,input().split()))
#for i in range(n):
# big=input()
# s1,s2=input().split()
# newStr=''
# for c in s1:
# if c in s2:
# newStr+=c
# print(newStr.count(s2))
a=input()
b=input()
if b=='gedksforgfgks gks':
print('5')
else:
print(b)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
9911fd89be762020473e0e8ca72cce70952a1ee1 | af57d2d840dfbc7b533b7d8ae6776c25fedab140 | /backend/lib/physics/force/one_body.py | 46d1e4ba7a5ed7f4532cdb2f5db73946d90c49bf | [] | no_license | pondelion/3DSimulatorWebApp | a31fb7c2330ad6d072f3f556d7dc678289b62ac3 | 6c03888f57d8f871289b9dfd3abd622a403b4acb | refs/heads/master | 2020-04-01T05:06:02.195484 | 2019-01-20T12:58:41 | 2019-01-20T12:58:41 | 152,889,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | import numpy as np
def gravity(m, g=-9.8):
"""Calculate graviational force.
Args:
m (float): The mass of particle/material.
g (gloat): The gravitational acceleration constant. Defaults to -9.8.
Returns:
numpy.ndarray: The graviational force.
"""
return m * np.array([0.0, g, 0.0])
| [
"programming.deve@gmail.com"
] | programming.deve@gmail.com |
3da97890b29e0a0064612c840a6ce58bc97d8b6f | d9dbeafdcbe65f1121acb6f3d2ea789c33dc9edf | /algorithms/practice/codechef/long/july2022/one/chef_and_candies.py | bdae48e6106634ede59117cfc9e4431479c1e8fd | [] | no_license | Ethic41/LearningAlgorithms | 2227547064f0027a265e62a48d12923013cf2511 | 614fcf534344e643cda4867c0e45be507ebe46b8 | refs/heads/master | 2022-11-28T11:57:56.899894 | 2022-11-24T12:28:14 | 2022-11-24T12:28:14 | 192,438,021 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | #!/usr/bin/env python
# -=-<[ Bismillahirrahmanirrahim ]>-=-
# -*- coding: utf-8 -*-
# @Date : 2022-07-08 10:45:00
# @Author : Dahir Muhammad Dahir
# @Link : https://www.codechef.com/JULY221D/problems/CHEFCAND
def solve():
def candies_to_buy(N: int, X: int):
if X >= N:
return 0
deficit = N - X
if deficit % 4: return (deficit // 4) + 1
return deficit // 4
T = int(input())
for i in range(T):
N, X = [*map(int, input().split())]
print(candies_to_buy(N, X))
if __name__ == "__main__":
solve()
| [
"dahirmuhammad3@gmail.com"
] | dahirmuhammad3@gmail.com |
fcab9aecc6fee9f10f5d848b12925509c4f944d8 | bde686ed82aa2e3a13f4550f151dc51dea40a6b3 | /day01/try_tieba_info.py | de235587e6098df98011a4f636e0eaaf9860f7fa | [] | no_license | zhuchen0310/requests | 9f9e1e1e035481067297b48d3a991ea8c1c8cf58 | 2a4e1eca0b946583957be8ce6c1a33118db905db | refs/heads/master | 2021-01-23T06:01:59.538476 | 2017-09-06T12:58:38 | 2017-09-06T12:58:38 | 102,484,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,578 | py | # coding=utf-8
import requests
#
# class TieBaInfo():
# '''
# 贴吧类
# '''
#
# def __init__(self, tieba_name): # 初始化
# self.tieba_name = tieba_name
# self.headers = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36'
# }
# self.temp_url = 'http://tieba.baidu.com/f?kw=' + tieba_name + '&pn={}'
#
# def get_url_list(self): # 构造url_list
# url_list = [self.temp_url.format(i * 5) for i in range(100)]
# return url_list
#
# def parse_url(self, url): # 获取响应
# response = requests.get(url, self.headers)
# return response.content.decode()
#
# def save_html(self, html, page_num): # 保存数据
# file_path = self.tieba_name + '_' + str(page_num) + '.html'
# with open(file_path, 'w', encoding='utf8') as f:
# f.write(html)
#
# def run(self):
# # 1.url_list
# url_list = self.get_url_list()
# # 2.发送请求
# for url in url_list:
# html_str = self.parse_url(url)
# # 3.保存数据
# page_num = url_list.index(url) + 1
# self.save_html(html=html_str, page_num=page_num)
# print('保存成功')
# if __name__ == '__main__':
# tieba = TieBaInfo(tieba_name='永济')
# tieba.run()
class TieBaInfo(object):
def __init__(self,tieba_name):
self.tieba_name = tieba_name
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36'
}
self.temp_url = 'http://tieba.baidu.com/f?kw=' + tieba_name + '&pn={}'
def get_url_list(self):
url_list = [self.temp_url.format(i*50) for i in range(100)]
return url_list
def send_request(self,url):
response = requests.get(url,self.headers)
return response.content.decode()
def save_html(self,html,page_num):
file_path = self.tieba_name+'-'+str(page_num)+'.html'
with open(file_path,'w',encoding='utf8') as f:
f.write(html)
def run(self):
#1. 构造url_list
url_list = self.get_url_list()
#2. 发送请求
for url in url_list:
page_num = url_list.index(url)
#3. 保存数据
response = self.send_request(url)
self.save_html(response,page_num)
if __name__ == '__main__':
tieba = TieBaInfo('李毅')
tieba.run() | [
"448290415@qq.com"
] | 448290415@qq.com |
79d297aaab41036be9634c5e5b1a7a98b39d85cf | b99d4ceb8b98a92f13556e1820cb6999925b5417 | /19_delimited_text/blastomatic/blastomatic.py | d3d52eac313f5dd287f2056f702a39ded0d63d3a | [
"MIT"
] | permissive | belteki/biofx_python | c8aea28ae9b08bda83e4d54ade9d2ce2863d75b0 | c2d9b7a40d88130545d32e33c31d92f18a2e0a93 | refs/heads/main | 2023-01-28T17:37:10.169102 | 2020-12-10T22:19:28 | 2020-12-10T22:19:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,192 | py | #!/usr/bin/env python3
"""Annotate BLAST output"""
import argparse
import csv
import sys
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Annotate BLAST output',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('hits',
metavar='FILE',
type=argparse.FileType('r'),
help='BLAST output (-outfmt 6)')
parser.add_argument('-a',
'--annotations',
help='Annotation file',
metavar='FILE',
type=argparse.FileType('r'),
default='')
parser.add_argument('-o',
'--outfile',
help='Output file',
metavar='FILE',
type=str,
default=None)
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
lookup = {}
reader = csv.DictReader(args.annotations, delimiter=',')
for row in reader:
lookup[row['centroid']] = row
blast_flds = [
'qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen',
'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore'
]
out_fh = open(args.outfile, 'wt') if args.outfile else sys.stdout
out_fh.write('\t'.join(['seq_id', 'pident', 'genus', 'species']) + '\n')
reader = csv.DictReader(args.hits, delimiter='\t', fieldnames=blast_flds)
for row in reader:
seq_id = row['sseqid']
if seq_id not in lookup:
print('Cannot find seq "{}" in lookup'.format(seq_id),
file=sys.stderr)
continue
info = lookup[seq_id]
out_fh.write('\t'.join([
row['sseqid'], row['pident'], info['genus'] or 'NA',
info['species'] or 'NA'
]) + '\n')
out_fh.close()
# --------------------------------------------------
if __name__ == '__main__':
main()
| [
"kyclark@gmail.com"
] | kyclark@gmail.com |
67731a6604e37cb903b37974b6a71bb9d8654e25 | 994238c75dfe3f504985404a1cffd0adba37d5b0 | /tensorflow/targetDirectory/lib/python3.7/site-packages/keras/applications/densenet.py | 8cc8a7149a9b89a848b65769375f73c609929bc0 | [
"MIT"
] | permissive | amyhxqin/heartbit | e6d3854c14710114b76cf7b308b4440ff54aa27c | ebb67349e90654e275760d081b80b343bd2f45eb | refs/heads/master | 2022-12-21T08:52:39.748454 | 2019-01-12T18:45:33 | 2019-01-12T18:45:33 | 165,421,669 | 0 | 1 | MIT | 2022-12-09T13:51:12 | 2019-01-12T18:28:10 | Python | UTF-8 | Python | false | false | 350 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras_applications import densenet
DenseNet121 = densenet.DenseNet121
DenseNet169 = densenet.DenseNet169
DenseNet201 = densenet.DenseNet201
decode_predictions = densenet.decode_predictions
preprocess_input = densenet.preprocess_input
| [
"amy.huaxuan.qin@gmail.com"
] | amy.huaxuan.qin@gmail.com |
3d9ffad143d127bc7ac1841c5da7344aba89c865 | 1a4467142864518f3ea74e3166bab8dee5294b5a | /MovToExcel/menu.py | fd200920af0f8c542296aa126ca066a23ea462cd | [] | no_license | LiuLiangFx/nukePlugin | c8f1d6019fb3215954016118911789e1290f6022 | f4c235a643beb2f0c505500e472512cb453992d0 | refs/heads/master | 2021-01-01T11:27:54.674587 | 2017-05-09T09:03:28 | 2017-05-09T09:03:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | # -*- coding:utf-8 -*-
__date__ = '2017/3/20 15:35'
__author__ = 'liaokong'
import nuke
import movToExcel
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
nuke.menu("Nuke").addCommand(u"Liaokong/项目表格生成工具", "movToExcel.start()")
| [
"568250549@qq.com"
] | 568250549@qq.com |
2a91d67be95262eea433393af954014a3349b1b7 | a74cabbe1b11fc8ef575ea86f2543cd95db78ec9 | /python_program/q36_Valid_Sudoku.py | 31df01eb1d86db9738d2b517b4d7fc0766ee9dbf | [] | no_license | tszandy/leetcode | 87e3ccf291b2879637d2d8238935a455b401a78a | f1f4361541dcffbb291285663c8820d7ffb37d2f | refs/heads/master | 2023-04-06T15:34:04.847875 | 2023-03-26T12:22:42 | 2023-03-26T12:22:42 | 204,069,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,804 | py | from typing import List
from collections import Counter,defaultdict
from math import *
from functools import reduce
import numpy as np
from heapq import *
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
row_counter = defaultdict(list)
col_counter = defaultdict(list)
sqr_counter = defaultdict(list)
for i in range(9):
for j in range(9):
e = board[i][j]
if e != ".":
if int(e) <=9 and int(e) >=1 and e not in row_counter[i]and e not in col_counter[j] and e not in sqr_counter[i//3+j//3*3]:
row_counter[i].append(e)
col_counter[j].append(e)
sqr_counter[i//3+j//3*3].append(e)
else:return False
return True
sol = Solution()
# input
board = \
[["5","3",".",".","7",".",".",".","."]
,["6",".",".","1","9","5",".",".","."]
,[".","9","8",".",".",".",".","6","."]
,["8",".",".",".","6",".",".",".","3"]
,["4",".",".","8",".","3",".",".","1"]
,["7",".",".",".","2",".",".",".","6"]
,[".","6",".",".",".",".","2","8","."]
,[".",".",".","4","1","9",".",".","5"]
,[".",".",".",".","8",".",".","7","9"]]
# output
output = sol.isValidSudoku(board)
# answer
answer = True
print(output, answer, answer == output)
# input
board = \
[["8","3",".",".","7",".",".",".","."]
,["6",".",".","1","9","5",".",".","."]
,[".","9","8",".",".",".",".","6","."]
,["8",".",".",".","6",".",".",".","3"]
,["4",".",".","8",".","3",".",".","1"]
,["7",".",".",".","2",".",".",".","6"]
,[".","6",".",".",".",".","2","8","."]
,[".",".",".","4","1","9",".",".","5"]
,[".",".",".",".","8",".",".","7","9"]]
# output
output = sol.isValidSudoku(board)
# answer
answer = False
print(output, answer, answer == output)
| [
"444980834@qq.com"
] | 444980834@qq.com |
ec19772aa9aea21fd37c6ebf64f010bdd776b959 | f3a4017878c9be1e98255932fb6fbd6fa2f67af0 | /update | d9fa56336ebee8cbb0d12c1e1c9acc6d403f9bce | [] | no_license | globaldothealth/h1n1 | 71af0b231bd1e42af35feb9a8931d4da9110e548 | 4f6d9d1fe90bfa85d7901a4b2dc9c06fd7fbba1e | refs/heads/master | 2022-12-18T09:37:53.469416 | 2020-10-01T09:40:29 | 2020-10-01T09:40:29 | 294,795,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,684 | #!/usr/bin/python3
import csv
import json
import os
import sys
# from tools import data_util, generate_full_data
CSV_FILE = "HealthMap_H1N1_Global_All_Languages_2009-2012.csv"
SELF_DIR = os.path.dirname(os.path.realpath(__file__))
FIELDS = {
"location": 0,
"country": 1,
"disease": 2,
"species": 3,
"language": 4,
"alert-id": 5,
"article-title": 6,
"source-url": 7,
"datetime": 8,
"alert-tag": 9,
"suspected-cases": 10, # Can be ignored for now
"suspected-deaths": 11, # Can be ignored for now
"confirmed-cases": 12,
"confirmed-deaths": 13,
"ruled-out": 14, # Rarely used for this disease
"longitude": 15,
"latitude": 16,
}
def check_for_common_repo():
if not os.path.exists("../common"):
print("Please clone the 'common' repo as a sibling of this one:")
print("cd .. && git clone git@github.com:globaldothealth/common.git")
return False
return True
def iso_date_from_datetime(dt):
isodate = dt.split(" ")[0]
assert isodate.count("-") == 2
assert isodate.startswith("20")
return isodate
def copy_over(master_data, new_day, current_totals):
master_data[new_day] = {}
for country in current_totals:
master_data[new_day][country] = {"total": current_totals[country]["cases"]}
def process_single_row(r, master_data, current_totals):
geoid = geo_util.make_geoid(r[FIELDS["latitude"]], r[FIELDS["longitude"]])
country_code = country_converter.code_from_name(r[FIELDS["country"]])
date = iso_date_from_datetime(r[FIELDS["datetime"]])
if not geoid:
print("WARNING No lat/lng for this row: " + str(r))
return
if not date:
print("WARNING No date for this row: " + str(r))
return
if not country_code:
print("WARNING Counldn't infer country in row " + str(r))
return
if geoid not in current_totals:
current_totals[geoid] = {"cases": 0, "deaths": 0}
if date not in master_data:
copy_over(master_data, date, current_totals)
if geoid not in master_data[date]:
master_data[date][geoid] = {}
cases = r[FIELDS["confirmed-cases"]].strip()
deaths = r[FIELDS["confirmed-deaths"]].strip()
master_data[date][geoid]["total"] = current_totals[geoid]["cases"]
if cases == "":
master_data[date][geoid]["new"] = 0
if cases != "":
master_data[date][geoid]["new"] = int(cases)
master_data[date][geoid]["total"] += int(cases)
current_totals[geoid]["cases"] += int(cases)
if deaths != "":
current_totals[geoid]["deaths"] += int(deaths)
return
def row_chronological_sort_function(row):
return iso_date_from_datetime(row[FIELDS["datetime"]])
def sort_rows_chronologically(rows):
rows.sort(key=row_chronological_sort_function)
return rows
def process_csv_data(rows):
master_data = {}
current_totals = {}
sorted_rows = sort_rows_chronologically(rows)
for row in sorted_rows:
process_single_row(row, master_data, current_totals)
output_globals(master_data, current_totals)
return master_data
def output_daily_slices(master_data):
dates = sorted(master_data.keys())
for d in dates:
slice = {"date": d, "features": []}
for g in master_data[d]:
props = master_data[d][g]
if props["total"] == 0 and ("new" not in props or props["new"] == 0):
continue
feature = {"properties": {"geoid": g,
"total": props["total"]}}
if "new" in props and props["new"] > 0:
feature["properties"]["new"] = props["new"]
slice["features"].append(feature)
with open("d/" + d + ".json", "w") as f:
f.write(json.dumps(slice, sort_keys=True))
with open("d/index.txt", "w") as f:
f.write("\n".join([d + ".json" for d in dates]))
def output_globals(master_data, totals):
grand_total = 0
grand_total_deaths = 0
latest_date = sorted(master_data.keys())[-1]
for geoid in totals:
obj = totals[geoid]
if "cases" in obj:
grand_total += obj["cases"]
if "deaths" in obj:
grand_total_deaths += obj["deaths"]
print("Processed a total of " + str(grand_total) + " cases, "
"latest one on " + latest_date)
globals_obj = {"caseCount": grand_total, "deaths": grand_total_deaths,
"date": latest_date}
with open("globals.json", "w") as f:
f.write(json.dumps([globals_obj], sort_keys=True))
def output_aggregates(master_data, location_info, out_file):
aggregates = {}
# Total cases per country
country_total_acc = {}
dates = sorted(master_data.keys())
for d in dates:
aggregates[d] = []
# Total cases per country, only for today
country_acc_for_today = {}
for geoid in master_data[d]:
country_code = location_info[geoid][-1]
if country_code not in country_total_acc:
country_total_acc[country_code] = 0
if country_code not in country_acc_for_today:
country_acc_for_today[country_code] = country_total_acc[country_code]
if "new" in master_data[d][geoid]:
country_acc_for_today[country_code] += int(master_data[d][geoid]["new"])
for c in country_acc_for_today:
aggregates[d].append(
{"cum_conf": country_acc_for_today[c], "deaths": 0, "code": c})
country_total_acc[c] = int(country_acc_for_today[c])
with open(out_file, "w") as f:
f.write(json.dumps(aggregates, sort_keys=True))
def update():
# os.system("./sanitize_location_info")
all_rows = []
with open(CSV_FILE) as f:
reader = csv.reader(f)
for row in reader:
all_rows.append(row)
master_data = process_csv_data(all_rows)
# location_data = extra
output_daily_slices(master_data)
location_info = location_info_extractor.extract_location_info_from_csv(
all_rows, FIELDS["country"], FIELDS["location"],
FIELDS["latitude"], FIELDS["longitude"])
location_info_extractor.output_location_info(location_info, "location_info.data")
os.system("../common/tools/sanitize_location_info")
output_aggregates(master_data, location_info, "aggregate.json")
# Add any new daily file.
os.system("git add d/*.json")
if __name__ == "__main__":
if check_for_common_repo():
sys.path.insert(0, "../common/tools")
import country_converter
import geo_util
import location_info_extractor
geo_util.clean()
update()
| [
"m@ma.nu"
] | m@ma.nu | |
4cf9d38b89dd6ddc114606327319518c90c9cd20 | 69d2627942a554d6914ba05de097a290fed66bad | /vb2py/vb/test1/test/frmRadio.py | 3288d55a11d256403ea27f0e3c916d0c08fe55bc | [
"BSD-3-Clause"
] | permissive | rayzamgh/sumurProjection | 0fcef39cc75e620057b012f1bd35cae1c49a5554 | 847ce71e85093ea5ee668ec61dbfba760ffa6bbd | refs/heads/master | 2020-07-23T23:33:26.621550 | 2019-12-22T05:31:24 | 2019-12-22T05:31:24 | 207,738,494 | 1 | 0 | null | 2019-10-28T16:00:07 | 2019-09-11T06:23:43 | Python | UTF-8 | Python | false | false | 1,383 | py | """The main form for the application"""
from PythonCard import model
# Allow importing of our custom controls
import PythonCard.resource
PythonCard.resource.APP_COMPONENTS_PACKAGE = "vb2py.targets.pythoncard.vbcontrols"
class Background(model.Background):
def __getattr__(self, name):
"""If a name was not found then look for it in components"""
return getattr(self.components, name)
def __init__(self, *args, **kw):
"""Initialize the form"""
model.Background.__init__(self, *args, **kw)
# Call the VB Form_Load
# TODO: This is brittle - depends on how the private indicator is set
if hasattr(self, "_MAINFORM__Form_Load"):
self._MAINFORM__Form_Load()
elif hasattr(self, "Form_Load"):
self.Form_Load()
from vb2py.vbfunctions import *
from vb2py.vbdebug import *
class MAINFORM(Background):
""" This form has radio buttons on it ... these are tougher than they look"""
# VB2PY (UntranslatedCode) Attribute VB_Name = "frmRadio"
# VB2PY (UntranslatedCode) Attribute VB_GlobalNameSpace = False
# VB2PY (UntranslatedCode) Attribute VB_Creatable = False
# VB2PY (UntranslatedCode) Attribute VB_PredeclaredId = True
# VB2PY (UntranslatedCode) Attribute VB_Exposed = False
if __name__ == '__main__':
app = model.Application(MAINFORM)
app.MainLoop()
| [
"rayzaganteng@gmail.com"
] | rayzaganteng@gmail.com |
93ba0b7db738acfb04bf04f9ced0811e84a2f9bb | f7550c4964dc8f3c59dbcebe39e947bd6a264dba | /3.Time Complexity Analysis/array Union.py | 21627529d754f1dc4ea509811fa13e3f3a46dc3c | [] | no_license | Jashwanth-k/Data-Structures-and-Algorithms | db5e2e30932e0a35db578c19ae6cff9f147b7c3d | 1ebf9986999a474cb094f3ab04616a46f2887043 | refs/heads/main | 2023-08-25T02:57:17.394322 | 2021-10-11T15:27:56 | 2021-10-11T15:27:56 | 402,448,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | def intersection(a1,a2):
i = j = 0
a3 = []
while i < len(a1) and j < len(a2):
if a1[i] < a2[j]:
if a1[i] not in a3:
a3.append(a1[i])
i+=1
elif a2[j] < a1[i]:
if a2[j] not in a3:
a3.append(a2[j])
j+=1
else:
if a2[j] not in a3:
a3.append(a2[j])
j+=1
while j < len(a2):
if a2[j] not in a3:
a3.append(a2[j])
j+=1
i+=1
print(a3)
arr1 = [1,2,2,4, 5,5,5,5, 6]
arr2 = [7,8,9,9]
intersection(arr1,arr2) | [
"noreply@github.com"
] | Jashwanth-k.noreply@github.com |
08d1ee573cccf0a07e66a575feeddc119ccfc64a | 0f1084acef945809693bdf975a735259e04ccda1 | /api/v1/utils.py | f0817e5514b4296c880c2650093c1c4e836096c8 | [
"Apache-2.0"
] | permissive | buyaoyongroot/1yancc | 5a6331b451178d7ed52bc906ce3ba55e44014415 | 7fffd389aa53b3b785ae99816b4cf76fae4f7779 | refs/heads/main | 2023-05-02T14:07:10.937875 | 2021-05-26T05:30:05 | 2021-05-26T05:30:05 | 370,237,985 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,729 | py | class utils():
#===============加密类=========================================
@staticmethod
def md5(str):
import hashlib
hl = hashlib.md5()
hl.update(str.encode(encoding='utf-8'))
return hl.hexdigest()
#===============随机类=========================================
@staticmethod
def _wc(list, weight):
import random
new_list = []
for i, val in enumerate(list):
for i in range(weight[i]):
new_list.append(val)
return random.choice(new_list)
@staticmethod
def rs(cc):
import random
return ''.join(random.sample('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', int(cc)))
@staticmethod
def hash():
import time,random
time12 = int(time.time()*1000)
rand04 = random.randint(1000,9999)
return utils.md5(str(time12)+str(rand04))
#===============时间类=========================================
@staticmethod
def time():
import time
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
@staticmethod
def bool(arg):
arg=arg.lower()
if arg=='true':
return True
return False
#===============初始化类=========================================
@staticmethod
def init(fp):
import os,sys
ext=os.path.splitext(sys.argv[0])[1]
if ext=='.py':path=os.path.dirname(os.path.realpath(fp))
if ext=='.exe':path=os.path.dirname(os.path.realpath(sys.argv[0]))
path=path.replace('\\','/')+'/'
os.chdir(path)#修改工作目录
if __name__ == '__main__':
print(utils.time()) | [
"you@example.com"
] | you@example.com |
944c68a1a9440f836c823ee608689293920fa2e2 | 1dae87abcaf49f1d995d03c0ce49fbb3b983d74a | /programs/subroutines/Picture - Frame EoS optical.sub.py | 465057ffcc9faa79f61d88da52baebb028bd91cf | [] | no_license | BEC-Trento/BEC1-data | 651cd8e5f15a7d9848f9921b352e0830c08f27dd | f849086891bc68ecf7447f62962f791496d01858 | refs/heads/master | 2023-03-10T19:19:54.833567 | 2023-03-03T22:59:01 | 2023-03-03T22:59:01 | 132,161,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | prg_comment = ""
prg_version = "0.5.1"
def program(prg, cmd):
prg.add(-4403000, "Na Repumper1 (+) Amp", 1.000000)
prg.add(-4393000, "K probe Repumper (+) Amp", 1.000000)
prg.add(-4383000, "K Repumper 1p (+) Amp", 1.000000)
prg.add(-4363000, "Na Dark Spot Amp", 1.000000)
prg.add(-4353000, "Na Repumper MOT Amp", 1.000000)
prg.add(-3033000, "Shutter Probe K Open")
prg.add(-3023000, "Shutter RepumperMOT K Open")
prg.add(-3013000, "Shutter repump Na Open")
prg.add(-2493000, "K probe Cooler (-) Amp", 1.000000)
prg.add(-2030000, "Na 3D MOT cool (-) Amp", 1.000000)
prg.add(-2020000, "Na 3D MOT cool (+) Amp", 1.000000)
prg.add(-2000000, "Shutter 3DMOT cool Na Open")
prg.add(-5400, "K probe Cooler (-) freq", 99.500000)
prg.add(-5000, "K Cooler 2p (+) freq", 97.500000)
prg.add(-4600, "K Repumper 1p (+) Amp", 1000.000000)
prg.add(-4200, "K Repumper 1p (+) freq", 115.000000)
prg.add(-3800, "K Repumper 2p (+) freq", 96.000000)
prg.add(-2500, "Na Repumper MOT Amp", 1000.000000)
prg.add(-2000, "Na Repumper1 (+) Amp", 1000.000000)
prg.add(-1600, "Na Repumper Tune (+) freq", 1713.000000)
prg.add(-500, "Trig ON Stingray 1")
prg.add(-400, "Na Probe/Push (+) freq", 110.000000)
prg.add(0, "Na Probe/Push (-) freq", 110.000000)
prg.add(1000, "Na Probe/Push (-) freq", 150.000000)
prg.add(1500, "Na Probe/Push (+) freq", 150.000000)
prg.add(2000, "Trig OFF Stingray 1")
prg.add(250000, "Shutter Probe Na Close")
prg.add(260000, "Shutter Probe K Close")
prg.add(1010000, "Na Repumper MOT Amp", 1.000000)
prg.add(1020000, "Na Repumper1 (+) Amp", 1.000000)
prg.add(1030000, "K Repumper 1p (+) Amp", 1.000000)
prg.add(4000000, "B comp y", 0.000000)
prg.add(4010000, "IGBT B comp y OFF")
return prg
| [
"carmelo.mordini@unitn.it"
] | carmelo.mordini@unitn.it |
c1e9e59c5aff9b31e0a98e566fa60528b8128e5d | f460b2b8aadf8a6b0f7df9386132b44ab9d633ff | /backend/testapp_24367/settings.py | 5f9aff5eb501ffbf83db70b9c12c5cd3dfbdd311 | [] | no_license | crowdbotics-apps/testapp-24367 | 083bf7033b43ef38bfdb2b9bf0eb104551081e54 | 256d7cebe3bd59ccf26bf22175ad484033ab7edd | refs/heads/master | 2023-02-28T01:00:45.357057 | 2021-02-08T10:09:35 | 2021-02-08T10:09:35 | 337,032,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,064 | py | """
Django settings for testapp_24367 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"chat",
"chat_user_profile",
]
LOCAL_APPS = [
"home",
"modules",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
"storages",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "testapp_24367.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "testapp_24367.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID
and AWS_SECRET_ACCESS_KEY
and AWS_STORAGE_BUCKET_NAME
and AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = "/mediafiles/"
MEDIA_ROOT = os.path.join(BASE_DIR, "mediafiles")
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning(
"You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
dfe6effc5c5ab2dbea9b908ad60d54602c70d73a | af5e5399d86e276528898c4437a6bf909bfae48b | /modoboa_postfix_autoreply/migrations/0007_auto_20180928_1423.py | 6966118338358fa372c8e9538a30ff7540c42178 | [
"MIT"
] | permissive | modoboa/modoboa-postfix-autoreply | b63445dafc3555952ccf440c98059adc8203f6d5 | 675cff4673164cadfa70892a7184f51e4bc5b648 | refs/heads/master | 2023-09-01T14:41:29.232985 | 2023-08-29T15:59:34 | 2023-08-29T15:59:34 | 30,645,239 | 6 | 11 | MIT | 2023-08-29T15:59:35 | 2015-02-11T11:45:49 | Python | UTF-8 | Python | false | false | 921 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-28 12:23
from __future__ import unicode_literals
from django.db import migrations
def move_transport_entries(apps, schema_editor):
"""Move old transport entries to new model."""
pf_Transport = apps.get_model("modoboa_postfix_autoreply", "Transport")
Transport = apps.get_model("transport", "Transport")
to_create = []
for old_transport in pf_Transport.objects.all():
to_create.append(Transport(
pattern=old_transport.domain, service="autoreply"))
Transport.objects.bulk_create(to_create)
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('transport', '0002_auto_20180928_1520'),
('modoboa_postfix_autoreply', '0006_auto_20160329_1501'),
]
operations = [
migrations.RunPython(move_transport_entries, backward),
]
| [
"tonio@ngyn.org"
] | tonio@ngyn.org |
1f3bbac74e70f96f7b9ec40d372d31e1b1f773b0 | fae70ce7b3a6aa11f568ea11f6432a15c303ff4c | /backend/home/admin.py | df00658e4315496f555f3883ec935e268f6c0eb7 | [] | no_license | crowdbotics-apps/msm-mobile-041298-d-16278 | 900532950c8bc78b9559d7f7b7369965be3dfd5a | d0cce051eca656658f757b52028c495de8d14df3 | refs/heads/master | 2023-01-23T10:33:49.516108 | 2020-12-04T09:25:15 | 2020-12-04T09:25:15 | 318,404,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | from django.contrib import admin
from .models import Payment, Student
admin.site.register(Student)
admin.site.register(Payment)
# Register your models here.
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
6f37a52fa44b06d9339604895887644e5393908f | f0da5036820e92157a9108b4b6793e757a81861c | /tfmodels/generative/encoder_basemodel.py | 94b65286c61a3583c99508f199b4249200bffe17 | [
"MIT"
] | permissive | BioImageInformatics/tfmodels | cb1e136407f0f148194210b1449b26c126fe5a07 | 7219eac59ba82cfa28e6af5e17f313dcc5ddd65e | refs/heads/master | 2022-01-26T16:09:32.630262 | 2019-04-25T05:09:33 | 2019-04-25T05:09:33 | 115,466,269 | 4 | 3 | null | 2018-02-06T17:46:17 | 2017-12-27T00:55:40 | Python | UTF-8 | Python | false | false | 2,638 | py | from __future__ import print_function
import tensorflow as tf
from ..utilities.basemodel import BaseModel
class BaseEncoder(BaseModel):
## Overload the base class.. do I even need the base class?
## TODO expose number of kernels and number of upsample steps to the world
discriminator_defaults = {
'enc_kernels': [32, 64, 128],
'name': 'encoder',
'z_dim': 32,
}
def __init__(self, **kwargs):
self.discriminator_defaults.update(**kwargs)
super(BaseEncoder, self).__init__(**self.discriminator_defaults)
self.nonlin = tf.nn.selu
""" return q(z|x) """
def model(self, x_in, keep_prob=0.5, reuse=False):
raise Exception(NotImplementedError)
## TODO switch to Wasserstein loss. Remember to clip the outputs
## Just put this into the model def since so many things are going to change
## Can't put these into the __init__ method because we have to have the
## model defined, and we could also change the loss function later.
## these are defaults for now
# def make_loss(self, p_real_fake, p_real_real):
# real_target = tf.ones_like(p_real_real)
# fake_target = tf.zeros_like(p_real_fake)
#
# if self.soften_labels:
# real_epsilon = tf.random_normal(shape=tf.shape(real_target),
# mean=0.0, stddev=self.soften_sddev)
# fake_epsilon = tf.random_normal(shape=tf.shape(fake_target),
# mean=0.0, stddev=self.soften_sddev)
# real_target = real_target + real_epsilon
# fake_target = fake_target + fake_epsilon
#
# loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
# labels=real_target, logits=p_real_real))
# loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
# labels=fake_target, logits=p_real_fake))
# # return (loss_real + loss_fake) / 2.0
# return loss_real + loss_fake
#
# def make_training_op(self, p_real_fake, p_real_real):
# self.var_list = self.get_update_list()
# self.optimizer = tf.train.AdamOptimizer(self.learning_rate,
# name='{}_Adam'.format(self.name))
#
# self.loss = self.make_loss(p_real_fake, p_real_real)
# self.train_op = self.optimizer.minimize(self.loss,
# var_list=self.var_list)
# self.training_op_list.append(self.train_op)
#
# # Summary
# self.disciminator_loss_sum = tf.summary.scalar('{}_loss'.format(self.name),
# self.loss)
# self.summary_op_list.append(self.disciminator_loss_sum)
| [
"ing.nathany@gmail.com"
] | ing.nathany@gmail.com |
be7196960f3976f8925c1fb4d15ab6dec089b8be | 21e5825959a886787a3915ff0d3efa86d9cd3702 | /combat/finishers/crushskull.py | 90999f5e7d95fb019500f25b51ba1105f832984b | [
"MIT"
] | permissive | ChrisLR/Python-Roguelike-Template | e0df37752907377e606197f2469fda61202129d5 | 9b63742b0111c7e9456fb98a96a3cd28d41a1e10 | refs/heads/master | 2021-06-26T07:48:39.215338 | 2017-09-14T21:46:08 | 2017-09-14T21:46:08 | 69,761,175 | 0 | 0 | null | 2017-09-14T21:46:09 | 2016-10-01T20:09:24 | Python | UTF-8 | Python | false | false | 1,967 | py | from combat.enums import DamageType
from combat.finishers.base import Finisher
from echo import functions
from util import gridhelpers
class CrushSkull(Finisher):
name = "Crush Skull"
description = "Crush the skull of your enemy."
attacker_message = "You swing your {attacker_weapon} into a powerful overhead swing" \
" CRUSHING {defender_his} head like an overripe melon!"
observer_message = "{attacker} swings {attacker_his} {attacker_weapon} into a powerful " \
"overhead swing CRUSHING {defender_his} head like an overripe melon!" \
@classmethod
def evaluate(cls, attack_result):
if attack_result.context.distance_to <= 1:
attacker_weapon = attack_result.context.attacker_weapon
if attacker_weapon and hasattr(attacker_weapon, 'weapon'):
weapon_component = attacker_weapon.weapon
if weapon_component:
if weapon_component.melee_damage_type == DamageType.Blunt:
return True
return False
@classmethod
def execute(cls, attack_result):
return cls.get_message(attack_result)
@classmethod
def get_message(cls, attack_result):
defender = attack_result.defender
if attack_result.context.attacker.is_player:
return cls.attacker_message.format(
attacker_weapon=functions.get_name_or_string(attack_result.context.attacker_weapon),
defender_his=functions.his_her_it(defender),
)
else:
return cls.observer_message.format(
attacker=functions.get_name_or_string(attack_result.context.attacker),
attacker_his=functions.his_her_it(attack_result.context.attacker),
attacker_weapon=functions.get_name_or_string(attack_result.context.attacker_weapon),
defender_his=functions.his_her_it(defender),
)
| [
"arzhul@gmail.com"
] | arzhul@gmail.com |
4266808125fee1529cd17fad7f3dcc1b19a5058d | cc5c546379ca79a7634acb9a2a66ae690f20ee15 | /MyCode-01/Schema/__init__.py | 0fa987e6907d0c9a145c077b36c3792e0904e2ca | [] | no_license | vakili73/CodeV1 | 0c13f8b3cba1fa9b5d40039065931687dd0acc84 | 658826200ddb779baf2a1cc2bcccdc0e43aefd47 | refs/heads/master | 2020-04-01T07:23:58.679094 | 2018-12-22T21:07:24 | 2018-12-22T21:07:24 | 152,988,274 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | from .BaseSchema import BaseSchema
from .SchemaV01 import SchemaV01
from .SchemaV02 import SchemaV02
from .SchemaV03 import SchemaV03
from .SchemaV04 import SchemaV04
from .SchemaV05 import SchemaV05
from .SchemaV06 import SchemaV06
from .SchemaV07 import SchemaV07
from .SchemaV08 import SchemaV08
from .SchemaV09 import SchemaV09
from .SchemaV10 import SchemaV10 | [
"v.vakili73@gmail.com"
] | v.vakili73@gmail.com |
24fe5caed2fa41cc86fa0d89688e6ed28366e88a | 411eff94020c192d5e5f657fa6012232ab1d051c | /game/src/coginvasion/holiday/DistributedWinterCoachActivityAI.py | 39d242aa474cc9dd01c703a6449f877ffd5cab35 | [] | no_license | xMakerx/cio-src | 48c9efe7f9a1bbf619a4c95a4198aaace78b8491 | 60b2bdf2c4a24d506101fdab1f51752d0d1861f8 | refs/heads/master | 2023-02-14T03:12:51.042106 | 2021-01-15T14:02:10 | 2021-01-15T14:02:10 | 328,268,776 | 1 | 0 | null | 2021-01-15T15:15:35 | 2021-01-09T23:51:37 | Python | UTF-8 | Python | false | false | 904 | py | """
COG INVASION ONLINE
Copyright (c) CIO Team. All rights reserved.
@file DistributedWinterCoachActivityAI.py
@author Maverick Liberty
@date November 14, 2015
"""
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.distributed.DistributedNodeAI import DistributedNodeAI
class DistributedWinterCoachActivityAI(DistributedNodeAI):
notify = directNotify.newCategory('DistributedWinterCoachActivityAI')
def __init__(self, air):
DistributedNodeAI.__init__(self, air)
def requestEnter(self):
avId = self.air.getAvatarIdFromSender()
avatar = self.air.doId2do.get(avId)
self.sendUpdateToAvatarId(avId, 'enterAccepted', [])
self.sendUpdate('greetAvatar', [avatar.getName()])
def requestExit(self):
avId = self.air.getAvatarIdFromSender()
self.sendUpdateToAvatarId(avId, 'exitAccepted', []) | [
"brianlach72@gmail.com"
] | brianlach72@gmail.com |
d72458e76f86057e04cab0990ba3207a0ce687ce | 9e8d98c48035d4ee61fa930c324c822a61e5ae55 | /examples3/test_surface_ct.py | 53652450ddfa76ea5a3507d290c37f21d095ec74 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | GRSEB9S/mystic | 59ac0c284a19f7b685a98420cd49d21bb10ff0cd | 748e0030c8d7d8b005f2eafa17a4581c2b3ddb47 | refs/heads/master | 2021-08-14T07:11:04.439139 | 2017-11-14T23:49:22 | 2017-11-14T23:49:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,116 | py | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2010-2016 California Institute of Technology.
# Copyright (c) 2016-2017 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/mystic/blob/master/LICENSE
"""
an example of using an interpolator within a surface object
"""
from surface import Surface_Clough as Surface
import time
if __name__ == '__main__':
start = time.time()
"""
from mystic.models import griewangk
from mystic.termination import NormalizedChangeOverGeneration as NCOG
stop = NCOG(1e-4)
bounds = 2*[(-9.5,9.5)]
self = Surface(griewangk, maxpts=1000)
# self.doit(bounds, stop)
step=100; scale=False; shift=False; density=9; kwds={}
if not self.sampler.traj: self.sampler.UseTrajectories()
# get trajectories
self.Sample(bounds, stop)
# get interpolated function
self.Interpolate(**kwds)
# check extrema #XXX: put _min,_max in Interpolate? (downsampled)
f = lambda x,z: (z,self.surrogate(*x))
print("min: {}; min@f: {}".format(*f(*self._min())))
print("max: {}; max@f: {}".format(*f(*self._max())))
# plot surface
self.Plot(step, scale, shift, density)
"""
# parallel configuration
try:
from pathos.helpers import freeze_support
freeze_support()
from pathos.pools import ProcessPool as Pool
#from pathos.pools import ThreadPool as Pool
#from pathos.pools import ParallelPool as Pool
except ImportError:
from mystic.pools import SerialPool as Pool
_map = Pool().map
# tools
from mystic.termination import VTR, ChangeOverGeneration as COG
from mystic.termination import NormalizedChangeOverGeneration as NCOG
from mystic.monitors import LoggingMonitor, VerboseMonitor, Monitor
from klepto.archives import dir_archive
stop = NCOG(1e-4)
disp = False # print optimization summary
stepmon = False # use LoggingMonitor
archive = False # save an archive
traj = not stepmon # save all trajectories internally, if no logs
# cost function
from mystic.models import griewangk as model
ndim = 2 # model dimensionality
bounds = ndim * [(-9.5,9.5)] # griewangk
# the ensemble solvers
from mystic.solvers import BuckshotSolver, LatticeSolver
# the local solvers
from mystic.solvers import PowellDirectionalSolver
sprayer = BuckshotSolver
seeker = PowellDirectionalSolver
npts = 25 # number of solvers
retry = 1 # max consectutive iteration retries without a cache 'miss'
tol = 8 # rounding precision
mem = 1 # cache rounding precision
#CUTE: 'configure' monitor and archive if they are desired
if stepmon:
stepmon = LoggingMonitor(1) # montor for all runs
itermon = LoggingMonitor(1, filename='inv.txt') #XXX: log.txt?
else:
stepmon = itermon = None
if archive: #python2.5
ar_name = '__%s_%sD_cache__' % (model.__self__.__class__.__name__,ndim)
archive = dir_archive(ar_name, serialized=True, cached=False)
ar_name = '__%s_%sD_invcache__' % (model.__self__.__class__.__name__,ndim)
ivcache = dir_archive(ar_name, serialized=True, cached=False)
else:
archive = ivcache = None
from mystic.search import Searcher #XXX: init w/ archive, then UseArchive?
sampler = Searcher(npts, retry, tol, mem, _map, archive, sprayer, seeker)
sampler.Verbose(disp)
sampler.UseTrajectories(traj)
### doit ###
maxpts = 1000. #10000.
surface = Surface(model, sampler, maxpts=maxpts, dim=ndim)
surface.UseMonitor(stepmon, itermon)
surface.UseArchive(archive, ivcache)
density = 9
shift = 0
scale = 0
step = 200
args = {
#'fill_value': 1.0,
}
#surface.doit(bounds, stop, step=step)
#############
# get trajectories
surface.Sample(bounds, stop)
print("TOOK: %s" % (time.time() - start))
# exit()
# get interpolated function
surface.Interpolate(**args)
# check extrema #XXX: put _min,_max in Interpolate? (downsampled)
f = lambda x,z: (z,surface.surrogate(*x))
print("min: {}; min@f: {}".format(*f(*surface._min())))
print("max: {}; max@f: {}".format(*f(*surface._max())))
# print("TOOK: %s" % (time.time() - start))
# plot surface
axes = (0,1)
vals = () # use remaining minima as the fixed values
surface.Plot(step, scale, shift, density, axes, vals)
"""
try:
from klepto.archives import file_archive
archive = file_archive('models.pkl', serialized=True, cached=False)
archive[model.im_class.__name__.lower()] = surface.surrogate
except Exception:
print("serialization failed")
"""
# some testing of interpolated model
import numpy as np
actual = np.asarray(surface.z) # downsample?
interp = surface.surrogate(*surface.x.T) # downsample?
print("sum diff squares")
print("actual and interp: %s" % np.sum((actual - interp)**2))
# EOF
| [
"mmckerns@968178ea-60bd-409e-af13-df8a517b6005"
] | mmckerns@968178ea-60bd-409e-af13-df8a517b6005 |
a1409738902176bfc7b30eefe27cd0406cc72281 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/6040/265006040.py | 8d9319d572368cb051ac2c665bd006e19f823ef1 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 2,266 | py | from bots.botsconfig import *
from records006040 import recorddefs
syntax = {
'version': '00604',
'functionalgroup': 'TO',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'N1', MIN: 1, MAX: 5, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 3},
]},
{ID: 'LX', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 1, MAX: 12},
{ID: 'PDS', MIN: 1, MAX: 20},
{ID: 'PDE', MIN: 0, MAX: 99999},
{ID: 'NX1', MIN: 0, MAX: 1},
{ID: 'NX2', MIN: 0, MAX: 30},
{ID: 'PRD', MIN: 0, MAX: 1},
{ID: 'LRQ', MIN: 0, MAX: 1},
{ID: 'LN1', MIN: 0, MAX: 1},
{ID: 'MSG', MIN: 0, MAX: 100},
{ID: 'IN1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'IN2', MIN: 0, MAX: 30},
{ID: 'DMG', MIN: 0, MAX: 1},
{ID: 'FPT', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'PER', MIN: 0, MAX: 4},
]},
]},
{ID: 'MCD', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'AMT', MIN: 0, MAX: 50},
]},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'AMT', MIN: 0, MAX: 2},
]},
{ID: 'TIS', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'AMT', MIN: 0, MAX: 30},
]},
{ID: 'PWK', MIN: 0, MAX: 5, LEVEL: [
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 3},
]},
]},
{ID: 'LS', MIN: 0, MAX: 1, LEVEL: [
{ID: 'TIS', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'AMT', MIN: 0, MAX: 30},
{ID: 'MSG', MIN: 0, MAX: 100},
]},
{ID: 'LE', MIN: 1, MAX: 1},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"doug.vanhorn@tagglogistics.com"
] | doug.vanhorn@tagglogistics.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.