blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8f0c91c576598b507ff7652c740be6f6b6462ec9 | 65d93b3db37f488356faa1789f1001f17191e345 | /isi_mip/climatemodels/migrations/0102_auto_20190408_1308.py | 9581823e6bd88e440f166ad4dfe4f5df2ee91833 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ISI-MIP/isimip | b4a19310dd772356eef87259783084836107cf4a | c2a78c727337e38f3695031e00afd607da7d6dcb | refs/heads/master | 2021-09-14T15:42:14.453031 | 2021-05-25T09:33:45 | 2021-05-25T09:33:45 | 237,446,232 | 0 | 0 | MIT | 2020-01-31T14:27:04 | 2020-01-31T14:27:03 | null | UTF-8 | Python | false | false | 914 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-04-08 11:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('climatemodels', '0101_impactmodel_simulation_round_specific_description'),
]
operations = [
migrations.RemoveField(
model_name='impactmodel',
name='model_output_license',
),
migrations.AlterField(
model_name='baseimpactmodel',
name='short_description',
field=models.TextField(blank=True, default='', help_text='This short description should assist other researchers in getting an understanding of your model, including the main differences between model versions used for different ISIMIP simulation rounds.', null=True, verbose_name='Short model description (all rounds)'),
),
]
| [
"hi@brueck.io"
] | hi@brueck.io |
f56291670cf5bac55339f2575f4ddc76147d4946 | dee7bbdddeae675f27bce0c9b79d972026bf388b | /Django/p5_connection_project/p5_connection_project/wsgi.py | 1332f5638464991446970a47345707c8c06d7043 | [] | no_license | KevinMichaelCamp/CodingDojo-Python | 529815dcef102ef4b9bbb18d0ec572172de5d2c4 | 30674b52423e0724908b3ab9930d3d80c807581d | refs/heads/master | 2022-12-10T13:20:22.723313 | 2021-02-18T02:22:00 | 2021-02-18T02:22:00 | 151,661,213 | 0 | 1 | null | 2022-12-08T03:03:05 | 2018-10-05T02:24:57 | Python | UTF-8 | Python | false | false | 419 | py | """
WSGI config for p5_connection_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'p5_connection_project.settings')
application = get_wsgi_application()
| [
"kevinmichaelcamp@gmail.com"
] | kevinmichaelcamp@gmail.com |
28bc2cd20ad54c1510a2240542dcab62c23db357 | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /Decorators__examples/decorator__args_as_funcs.py | 93655d075ac3c19fdba95b7b26163d49ff67f6ef | [
"CC-BY-4.0"
] | permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,041 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
class TextBuilder:
def __init__(self):
self.result = []
# Функция, принимающая аргументы и возвращающая декоратор
def _call_before(*funcs):
# Сам декоратор
def decorator(func):
# Функция-обертка, заменит собой декорируемую
def wrapper(self, *args, **kwargs):
for f in funcs:
f(self)
return func(self, *args, **kwargs)
# Декоратор возвращает обертку
return wrapper
# Возаращаем сам декоратор
return decorator
# Функция, принимающая аргументы и возвращающая декоратор
def _call_after(*funcs):
# Сам декоратор
def decorator(func):
# Функция-обертка, заменит собой декорируемую
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
for f in funcs:
f(self)
return result
# Декоратор возвращает обертку
return wrapper
# Возаращаем сам декоратор
return decorator
@_call_before(lambda self: self.result.append('+' + '-' * 10 + '+'))
@_call_after(lambda self: self.result.append('+' + '-' * 10 + '+'), lambda self: self.result.append('\n'))
def append(self, text: str) -> 'TextBuilder':
self.result.append(text)
return self
def build(self):
return '\n'.join(self.result)
builder = TextBuilder()
builder.append("Foo").append("Bar").append("Hello World!")
print(builder.build())
# +----------+
# Foo
# +----------+
#
#
# +----------+
# Bar
# +----------+
#
#
# +----------+
# Hello World!
# +----------+
| [
"ilya.petrash@inbox.ru"
] | ilya.petrash@inbox.ru |
a6c80e22b97fbe9dfcdc635ef03d12eea5b06884 | 9f951479d5eda96e7fecbbbd0b3b7e4f5e83360d | /webtest/全栈课程代码学员版/Level3课程代码/Level3Code/lesson8/LessonCode/OriginalVersion/tenmins/urls.py | c0720b4831408ae6c32907c61dc3b51f8fa8d8b8 | [] | no_license | lianbo2006/Project | 44c5b6fcab4fe31b80bfff467b3e0e31fd2da8ba | 5d13923817a1d4cffe7d4abbb5873277ce28bb87 | refs/heads/master | 2021-01-11T18:24:47.597849 | 2017-04-25T03:44:47 | 2017-04-25T03:44:47 | 79,539,300 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,804 | py | """tenmins URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from website.views import listing, index_login, index_register, detail, detail_vote
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth.views import logout
from website.mobile_views import video_list
from website.api import video
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^list/$', listing, name='list'),
url(r'^list/(?P<cate>[A-Za-z]+)$', listing, name='list'),
url(r'^detail/(?P<id>\d+)$', detail, name='detail'),
url(r'^detail/vote/(?P<id>\d+)$', detail_vote, name='vote'),
url(r'^login/$', index_login, name='login'),
url(r'^register/$', index_register, name='register'),
url(r'^logout/$', logout, {'next_page': '/register'}, name='logout'),
url(r'^api/video/', video),
url(r'^m/videolist/', video_list),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"513748889@qq.com"
] | 513748889@qq.com |
7f3d9b3fc7a7807540e8318fc33426af29965e20 | cb1fb3bf87b3f7006b564a0f2acd2d68e5d5ffaa | /pyram/tree/treemodule.py | e1ed25bba91eb8e5f5f36a320ce2c54625637cd1 | [
"MIT"
] | permissive | Hoseung/pyRamAn | 2778f8b12ca966e7586ebf077a964aecd1654223 | f9386fa5a9f045f98590039988d3cd50bc488dc2 | refs/heads/master | 2021-06-22T18:35:06.478492 | 2021-06-05T03:26:31 | 2021-06-05T03:26:31 | 227,741,934 | 1 | 1 | MIT | 2020-03-04T12:39:06 | 2019-12-13T02:49:30 | Jupyter Notebook | UTF-8 | Python | false | false | 7,126 | py | # -*- coding: utf-8 -*-
"""
treemodule
Created on Sun Jun 14 06:35:45 2015
@author: hoseung
"""
import numpy as np
def _is_ascii(filename):
return filename.split(".")[-1] == "dat"
class CTree(object):
"""
compatible with ConsistentTrees 1.01
"""
def __init__(self, filename=None):
if filename is not None:
self.load(filename=filename)
def _add_info(self):
#self.pboxsize = 199.632011
self.pboxsize = 200.0
def _load_ascii(self, filename):
cnt_header = 0
datatype =[ 'f8','i8','i8','i8','i8','i8','i8','i8','i8','f8'\
,'f8','f8','i8','f8','f8','f8','i8','f8','f8','f8'\
,'f8','f8','f8','f8','f8','f8','f8','i8','i8','i8'\
,'i8','i8','i8','i8','i8','f8','i8']#,\
with open(filename, 'rb') as f:
for i in range(180):
line = f.readline()
line = line.decode('utf-8')
if line[0] != '#':
self.ntrees = int(line) # The first line after the header is the number of trees.
cnt_header = f.tell()
break
f.seek(cnt_header)
self.data = np.genfromtxt(f,dtype=datatype)
self.data.dtype.names=(\
'aexp','id','desc_aexp','desc_id','nprog','pid','upid','desc_pid','phantom','sam_mvir'\
,'mvir','rvir','rs','vrms','mmp','aexp_last_MM','vmax','x','y','z'\
,'vx','vy','vz','jx','jy','jz','spin','b_id','d_id','tree_root_id'\
,'Orig_halo_id','nout','next_coprogenitor_d_id','last_progenitor_d_id'\
,'last_mainleaf_depthfirst_id', 'tidal_force', 'tidal_id')
print("Loading Consistent Tree data from ASCII is done")
def _load_pickle(self, filename):
import pickle
try:
with open(filename, "rb") as f:
self.data = pickle.load(f)
print("Loading Consistent Tree data from ASCII is done")
except IOError:
print("Error, No such file.", filename)
def load(self, filename=None):
if filename is None:
# from tkinter import tk
# tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
from tkinter.filedialog import askopenfilename
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
if _is_ascii(filename):
self._load_ascii(filename)
else:
self._load_pickle(filename)
# The tree output file is written in 'bytes' string rather than string - this is the modern concept of text.
# So the b' is at the very begining of lines. Python3 now distinguishes between string and byte string.
#
# Python3 strings are unicode by default.
# You need to specify the encoding of the text file.
# Of course you can there are built-in methods to detect the encoding of a text file.
# The output of Consistent tree is 'utf-8'
# Additional complexity is that
# numpy genfromtxt always want it to be byte strings.
# So you need 'rb' rather than 'r'
self._tree_ids()
def _tree_ids(self):
i = np.where(self.data['nout'] == max(self.data['nout']))[0]
self.trees_idx = self.data['id'][i]
self.trees_id = self.data['Orig_halo_id'][i]
""" get_main_prg in treeutils is a working version.
Use that instead.
def get_main_prg(trees, haloid=None, haloind=None, unique_id=True):
def get_main_prg(self, ids=None, original=True):
if ids is None:
if original:
ids = self.trees_id
else:
ids = self.trees_idx
if type(ids) is not list: ids = [ ids ]
for thisid in ids:
tree = self.data[np.where(self.data['tree_root_id'] == thisid)]
prgs=[]
next_h = tree[0]['id']
while next_h != -1:
print(next_h)
i_hal = np.where(tree['id'] == next_h)
halo = tree[i_hal]
next_h = halo['last_progenitor_d_id']
prgs.append(next_h)
"""
# Now, what are the main progenitors?
def show_data(self, data, ind):
'''
This prints list of filed names and values of a numpy array in two column format.
Sometimes there are too many fields that listing the names in a row makes it difficult
to match the field name and the field value.
Example)
>>>
'''
for ii,jj in zip(self.data.dtype.names,data[ind]):
print("%s : %f" % (ii,jj))
def load_tree(wdir, is_gal=False, no_dump=False, load_ascii=False):
import pickle
#from tree import treemodule
import tree.ctutils as ctu
from general import defaults
df = defaults.Default()
tree_path = df.tree_path(is_gal=is_gal)
if load_ascii:
alltrees = CTree()
alltrees.load(filename= wdir + tree_path + 'tree_0_0_0.dat')
# Fix nout -----------------------------------------------------
nout_max = alltrees.data['nout'].max()
alltrees.data['nout'] += 187 - nout_max
print("------ NOUT fixed")
alltrees.data = ctu.augment_tree(alltrees.data, wdir, is_gal=is_gal)
print("------ tree data extended")
if not no_dump:
pickle.dump(alltrees, open(wdir + df.ext_tree_pickle(is_gal=is_gal), "wb" ))
else:
try:
alltrees = pickle.load(open(wdir + df.ext_tree_pickle(is_gal=is_gal), "rb" ))
print("Loaded an extended tree")
except:
alltrees = CTree()
alltrees.load(filename= wdir + tree_path + 'tree_0_0_0.dat')
# Fix nout -----------------------------------------------------
nout_max = alltrees.data['nout'].max()
alltrees.data['nout'] += 187 - nout_max
print("------ NOUT fixed")
alltrees.data = ctu.augment_tree(alltrees.data, wdir, is_gal=is_gal)
print("------ tree data extended")
if not no_dump:
pickle.dump(alltrees, open(wdir + df.ext_tree_pickle(is_gal=is_gal), "wb" ))
return alltrees
def rs2codeunit(rst):
"""
Check and clean up.
nout in Consistent Tree by default starts from 0 regardless of
the original simulation snapshot number.
This function assumes the nouts are already fixed.
In practice, it should be fixed when reading from ASCII and pickling it.
"""
import numpy as np
nouts = np.unique(rst['nout'])
for nout in nouts:
dir_halo = base + "rhalo/rockstar_halos/"
f_tree = base + "rhalo/tree.pickle"
with open(f_tree, "rb") as ft:
rstree = pickle.load(ft)
rstree['x'] *= 200/199.632011
rstree['y'] *= 200/199.632011
rstree['z'] *= 200/199.632011
| [
"hopung@gmail.com"
] | hopung@gmail.com |
36f0354909d9d144b680d363b46210367d9d65f2 | 9e549ee54faa8b037f90eac8ecb36f853e460e5e | /quiz.py | 487aec0b5842d9ac80fc5c7efaacddddf7c41cd2 | [
"MIT"
] | permissive | aitoehigie/britecore_flask | e8df68e71dd0eac980a7de8c0f20b5a5a16979fe | eef1873dbe6b2cc21f770bc6dec783007ae4493b | refs/heads/master | 2022-12-09T22:07:45.930238 | 2019-05-15T04:10:37 | 2019-05-15T04:10:37 | 177,354,667 | 0 | 0 | MIT | 2022-12-08T04:54:09 | 2019-03-24T00:38:20 | Python | UTF-8 | Python | false | false | 483 | py | from cryptography.fernet import Fernet
key = 'TluxwB3fV_GWuLkR1_BzGs1Zk90TYAuhNMZP_0q4WyM='
# Oh no! The code is going over the edge! What are you going to do?
message = b'gAAAAABcmZNjB1CqmvCWcO8ByvVTKas3rKh5Py67Al5tOma1TtHeQBQJi55SmpN86uNXNFX7_clMNLWCB5HzqkcOkkz3V7KzxeFo7q4ZNIGsa4tb82l0sVTcS2zCW7-Rk7kcnkLl_Jsw2F98JMpnnLa4ZrlZqPoteBSgjBr7vmk4Z5GXIpAqPU5TBk2K51m2RE47HogruLsO'
def main():
f = Fernet(key)
print(f.decrypt(message))
if __name__ == "__main__":
main()
| [
"aitoehigie@gmail.com"
] | aitoehigie@gmail.com |
0ef156a76ca296a030eba1c782df633bae7c1ba4 | 7bd3c35070d40724ab21e83b4d3f5ba39e455818 | /E2 - Second/eshop/admin.py | 10f96f6813b97567fbb8ce70c6afe590ef33d481 | [] | no_license | shatishdesai202/Django-Project-Practice | 9433004de6fd72dd0cd56cb4ff7770ecded6a054 | f66ee507fcf959d148627c1c2f5d587b10adc996 | refs/heads/main | 2023-03-12T17:14:15.823285 | 2021-03-07T05:32:07 | 2021-03-07T05:32:07 | 345,263,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | from django.contrib import admin
from .models import Category, Product, Placeorder, Comment
# Register your models here.
@admin.register(Category)
class AdminCategory(admin.ModelAdmin):
list_display = ['id', 'c_name']
@admin.register(Product)
class AdminProduct(admin.ModelAdmin):
list_display = ['id','category', 'p_name', 'price', 'desc', 'timestamp']
@admin.register(Placeorder)
class AdminPlaceorder(admin.ModelAdmin):
list_display = ['id','firstname', 'last_name', 'email', 'address', 'city', 'state', 'pin', 'item', 'qty', 'customer', ]
@admin.register(Comment)
class AdminComment(admin.ModelAdmin):
list_display = ['comment'] | [
"sdondjango@gmail.com"
] | sdondjango@gmail.com |
c520a22448096a9aec2f2b0a15c922d05bc96916 | 77932fb1dde97eafe995b960d62036e2511465d2 | /shoppingsite.py | cab310f0cc96165443d84cfc2f906044fadb1b0b | [] | no_license | lsylk/shopping-site | f2dc859a3f07ca5855246f3573c7e13c1885c3ba | f29020824bec51e71c3ba6197f8420d149a4bd16 | refs/heads/master | 2020-12-24T18:42:20.858411 | 2016-04-22T00:14:44 | 2016-04-22T00:14:44 | 56,802,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,814 | py | """Ubermelon shopping application Flask server.
Provides web interface for browsing melons, seeing detail about a melon, and
put melons in a shopping cart.
Authors: Joel Burton, Christian Fernandez, Meggie Mahnken.
"""
from flask import Flask, render_template, redirect, flash, session, request
import jinja2
import melons
app = Flask(__name__)
# Need to use Flask sessioning features
app.secret_key = 'this-should-be-something-unguessable'
# Normally, if you refer to an undefined variable in a Jinja template,
# Jinja silently ignores this. This makes debugging difficult, so we'll
# set an attribute of the Jinja environment that says to make this an
# error.
app.jinja_env.undefined = jinja2.StrictUndefined
@app.route("/")
def index():
"""Return homepage."""
return render_template("homepage.html")
@app.route("/melons")
def list_melons():
"""Return page showing all the melons ubermelon has to offer"""
melon_list = melons.get_all()
return render_template("all_melons.html",
melon_list=melon_list)
@app.route("/melon/<int:melon_id>")
def show_melon(melon_id):
"""Return page showing the details of a given melon.
Show all info about a melon. Also, provide a button to buy that melon.
"""
melon = melons.get_by_id(melon_id)
print melon
return render_template("melon_details.html",
display_melon=melon)
@app.route("/cart")
def shopping_cart():
"""Display content of shopping cart."""
# TODO: Display the contents of the shopping cart.
# The logic here will be something like:
#
# - get the list-of-ids-of-melons from the session cart
# - loop over this list:
# - keep track of information about melon types in the cart
# - keep track of the total amt ordered for a melon-type
# - keep track of the total amt of the entire order
# - hand to the template the total order cost and the list of melon types
for id in session.values():
melon_info = melons.get_by_id(id)
# melon = melons.get_by_id(melon_id)
# raise Exception("Fix me")
return render_template("cart.html")
@app.route("/add_to_cart/<int:id>")
def add_to_cart(id):
"""Add a melon to cart and redirect to shopping cart page.
When a melon is added to the cart, redirect browser to the shopping cart
page and display a confirmation message: 'Successfully added to cart'.
"""
# TODO: Finish shopping cart functionality
# The logic here should be something like:
#
# - add the id of the melon they bought to the cart in the session
session.setdefault("cart", []).append(str(id)) # If cart is not a key in session, then create that key with an empty list value. Everytime we add something new to the cart, append the id to the list.
orders = ", ".join(session["cart"]) # Items in list must be returned back as string.
flash("Successfully added to cart!")
return redirect("/cart")
@app.route("/login", methods=["GET"])
def show_login():
"""Show login form."""
return render_template("login.html")
@app.route("/login", methods=["POST"])
def process_login():
"""Log user into site.
Find the user's login credentials located in the 'request.form'
dictionary, look up the user, and store them in the session.
"""
# TODO: Need to implement this!
return "Oops! This needs to be implemented"
@app.route("/checkout")
def checkout():
"""Checkout customer, process payment, and ship melons."""
# For now, we'll just provide a warning. Completing this is beyond the
# scope of this exercise.
flash("Sorry! Checkout will be implemented in a future version.")
return redirect("/melons")
if __name__ == "__main__":
app.run(debug=True)
| [
"info@hackbrightacademy.com"
] | info@hackbrightacademy.com |
4e93574da1da48b3fd89023b214a51f62f586e7a | b1605b9fcc04adb8d8b095104b1e0cb5d7e39fbf | /scienceApp/views.py | 5a2e37086c344767947a9baee88500bf8ae29eef | [] | no_license | ZLS1803/hengda | 4f022c868590d0b2cd2cc644d9cabd7a7aa4a1f5 | 8f3b801353932528c499a0ae2d40df08fdd44214 | refs/heads/main | 2023-07-19T11:37:40.898445 | 2021-09-12T02:41:18 | 2021-09-12T02:41:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | from django.shortcuts import render
# Create your views here.
def science(request):
return render(request, 'scienceApp/science.html')
| [
"email"
] | email |
b05937c4e6edd1ec39ddc284c0ffb6a77f4e3951 | aa52a6a6a9db8ccc35a36d0ec42d06dc8070d1f6 | /migrations/versions/e716104ed757_init.py | 7449cda2864b787936d10bc387c0578b6f83d19f | [
"MIT"
] | permissive | Kel0/aiogram_bot_template | 38a9e64b7010eec61e017442cceecb4adb1020c1 | c5cd2129c99824efe122a951ada27936f3a35b0a | refs/heads/main | 2023-04-12T05:56:58.086964 | 2021-05-19T10:48:03 | 2021-05-19T10:48:03 | 368,808,317 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | """init
Revision ID: e716104ed757
Revises:
Create Date: 2021-05-19 15:59:22.801097
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e716104ed757'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('updated_at', sa.TIMESTAMP(), nullable=False),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('telegram_id', sa.Integer(), nullable=True),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('role', sa.String(length=30), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('telegram_id'),
sa.UniqueConstraint('telegram_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('users')
# ### end Alembic commands ###
| [
"rozovdima123@gmail.com"
] | rozovdima123@gmail.com |
af898e4d962f53afbb60cce12d3671b58739fb41 | 50ec7dde190bfa4671a1a6db307375efb607b05b | /day04/part2.py | 9c73c10ae2a100e8a9793690047044d10e4324ed | [
"MIT"
] | permissive | mtn/advent15 | 10a07f1bbc27c90405504cfaf0ac82f7731101e4 | b23bcf5761363596336d5361218c52db0b078793 | refs/heads/master | 2021-07-05T12:47:41.302328 | 2019-01-03T03:28:31 | 2019-01-03T03:28:31 | 163,866,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | #!/usr/bin/env python3
import hashlib
inp = "yzbqklnj"
i = 0
while True:
m = hashlib.md5()
m.update(str.encode("{}{}".format(inp, i)))
if m.hexdigest()[:6] == "000000":
print(i)
break
i += 1
| [
"michaeltnoronha@gmail.com"
] | michaeltnoronha@gmail.com |
a1463bc23152746dc7b8f4ce3c9f84b5aad9a31e | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /tests/components/devolo_home_network/test_sensor.py | 230457f56173898a1cd34f6f92a7ead81b8fc354 | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 3,353 | py | """Tests for the devolo Home Network sensors."""
from datetime import timedelta
from unittest.mock import AsyncMock
from devolo_plc_api.exceptions.device import DeviceUnavailable
from freezegun.api import FrozenDateTimeFactory
import pytest
from syrupy.assertion import SnapshotAssertion
from homeassistant.components.devolo_home_network.const import (
LONG_UPDATE_INTERVAL,
SHORT_UPDATE_INTERVAL,
)
from homeassistant.components.sensor import DOMAIN
from homeassistant.const import STATE_UNAVAILABLE
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from . import configure_integration
from .mock import MockDevice
from tests.common import async_fire_time_changed
@pytest.mark.usefixtures("mock_device")
async def test_sensor_setup(hass: HomeAssistant) -> None:
"""Test default setup of the sensor component."""
entry = configure_integration(hass)
device_name = entry.title.replace(" ", "_").lower()
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(f"{DOMAIN}.{device_name}_connected_wifi_clients") is not None
assert hass.states.get(f"{DOMAIN}.{device_name}_connected_plc_devices") is None
assert hass.states.get(f"{DOMAIN}.{device_name}_neighboring_wifi_networks") is None
await hass.config_entries.async_unload(entry.entry_id)
@pytest.mark.parametrize(
("name", "get_method", "interval"),
[
[
"connected_wifi_clients",
"async_get_wifi_connected_station",
SHORT_UPDATE_INTERVAL,
],
[
"neighboring_wifi_networks",
"async_get_wifi_neighbor_access_points",
LONG_UPDATE_INTERVAL,
],
[
"connected_plc_devices",
"async_get_network_overview",
LONG_UPDATE_INTERVAL,
],
],
)
@pytest.mark.usefixtures("entity_registry_enabled_by_default")
async def test_sensor(
hass: HomeAssistant,
mock_device: MockDevice,
entity_registry: er.EntityRegistry,
freezer: FrozenDateTimeFactory,
snapshot: SnapshotAssertion,
name: str,
get_method: str,
interval: timedelta,
) -> None:
"""Test state change of a sensor device."""
entry = configure_integration(hass)
device_name = entry.title.replace(" ", "_").lower()
state_key = f"{DOMAIN}.{device_name}_{name}"
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(state_key) == snapshot
assert entity_registry.async_get(state_key) == snapshot
# Emulate device failure
setattr(mock_device.device, get_method, AsyncMock(side_effect=DeviceUnavailable))
setattr(mock_device.plcnet, get_method, AsyncMock(side_effect=DeviceUnavailable))
freezer.tick(interval)
async_fire_time_changed(hass)
await hass.async_block_till_done()
state = hass.states.get(state_key)
assert state is not None
assert state.state == STATE_UNAVAILABLE
# Emulate state change
mock_device.reset()
freezer.tick(interval)
async_fire_time_changed(hass)
await hass.async_block_till_done()
state = hass.states.get(state_key)
assert state is not None
assert state.state == "1"
await hass.config_entries.async_unload(entry.entry_id)
| [
"noreply@github.com"
] | home-assistant.noreply@github.com |
8919c999f822111499f7a48ea5970e4e66bdf2d6 | 13cf11440998376d3b52a49f1e4fb8936c360ac4 | /chainer_chemistry/utils/extend.py | bc1fd94d7cbab82a4c46b3f94174cba64feebd46 | [
"MIT"
] | permissive | k-ishiguro/chainer-chemistry | 87e3db724de0e99042d9585cd4bd5fff38169339 | aec33496def16e76bdfbefa508ba01ab9f79a592 | refs/heads/master | 2021-07-06T22:58:20.127907 | 2019-02-04T02:51:34 | 2019-02-04T02:51:34 | 169,345,375 | 1 | 1 | MIT | 2020-07-30T06:04:13 | 2019-02-06T02:27:39 | Python | UTF-8 | Python | false | false | 3,569 | py | from collections import Iterable
from logging import getLogger
import six
from chainer import cuda
def _to_list(a):
if isinstance(a, Iterable):
a = list(a)
else:
a = [a]
return a
def extend_node(node, out_size, axis=-1, value=0):
"""Extend size of `node` array
For now, this function works same with `extend_array` method,
this is just an alias function.
Args:
node (numpy.ndarray): the array whose `axis` to be extended.
first axis is considered as "batch" axis.
out_size (int): target output size for specified `axis`.
axis (int): node feature axis to be extended.
Default is `axis=-1`, which extends only last axis.
value (int or float): value to be filled for extended place.
Returns (numpy.ndarray): extended `node` array, extended place is filled
with `value`
"""
return extend_arrays_to_size(
node, out_size=out_size, axis=axis, value=value)
def extend_adj(adj, out_size, axis=None, value=0):
"""Extend size of `adj` array
For now, this function only differs default `axis` value from
`extend_array` method, this is an alias function.
Args:
adj (numpy.ndarray): the array whose `axis` to be extended.
first axis is considered as "batch" axis.
out_size (int): target output size for specified `axis`.
axis (list or None): node feature axis to be extended. Default is None,
in this case `axis=[-1, -2]` is used to extend last 2 axes.
value (int or float): value to be filled for extended place.
Returns (numpy.ndarray): extended `adj` array, extended place is filled
with `value`
"""
axis = axis or [-1, -2]
return extend_arrays_to_size(
adj, out_size=out_size, axis=axis, value=value)
def extend_arrays_to_size(arrays, out_size, axis=-1, value=0):
"""Extend size of `arrays` array
Args:
arrays (numpy.ndarray): the array whose `axis` to be extended.
first axis is considered as "batch" axis.
out_size (int): target output size for specified `axis`.
axis (int or list): node feature axis to be extended.
value (int or float): value to be filled for extended place.
Returns (numpy.ndarray): extended array, extended place is filled
with `value`
"""
batch_size = len(arrays)
in_shape = _to_list(arrays[0].shape)
out_shape = [batch_size] + in_shape
axis = _to_list(axis)
for ax in axis:
if ax == 0:
logger = getLogger(__name__)
logger.warning('axis 0 detected, but axis=0 is expected to be '
'batch size dimension.')
if out_shape[ax] > out_size:
raise ValueError(
'current size={} is larger than out_size={} at axis={}'
.format(out_shape[ax], out_size, ax))
out_shape[ax] = out_size
return extend_arrays_to_shape(arrays, out_shape, value=value)
def extend_arrays_to_shape(arrays, out_shape, value=0):
# Ref: `_concat_arrays_with_padding` method in chainer convert.py
# https://github.com/chainer/chainer/blob/master/chainer/dataset/convert.py
xp = cuda.get_array_module(arrays[0])
with cuda.get_device_from_array(arrays[0]):
result = xp.full(out_shape, value, dtype=arrays[0].dtype)
for i in six.moves.range(len(arrays)):
src = arrays[i]
slices = tuple(slice(dim) for dim in src.shape)
result[(i,) + slices] = src
return result
| [
"acc1ssnn9terias@gmail.com"
] | acc1ssnn9terias@gmail.com |
fc20c2db8fe61bc1e4f6569081b063d2bf20d899 | 046df94b4f437b2e30b80d24193fcd5380ee7b54 | /practice/8class_and_object_orinented_programming/test_override.py | ed8de443ce1da2e391a7ce408dd459953a793b13 | [] | no_license | LordBao666/MITLecture6.0001_Introduction_To_CS_Programing_In_Python | 570565a3a931269f47fe15fd83527567a24fc134 | e9fca10ad0226c8620ae36d063c2bc49da114ca4 | refs/heads/master | 2023-04-02T10:40:48.564479 | 2021-04-06T15:19:47 | 2021-04-06T15:19:47 | 344,118,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | """
@Author : Lord_Bao
@Date : 2021/3/15
"""
class MySupClass(object):
def __init__(self):
pass
def method_to_be_implemented(self):
raise NotImplementedError
class Offspring(MySupClass):
def method_to_be_implemented(self):
pass
def special_method(self):
pass
if __name__ == '__main__':
son = Offspring()
| [
"916900021@qq.com"
] | 916900021@qq.com |
4869c36593cd8c01de13e24e36e3cceefb50893a | 350ecc8259bcad075bd376423335bb41cc8a533e | /edit2.cgi | 8313bbb14d29515c7333953d5ce3cee907eeb7e9 | [] | no_license | CodedQuen/python_begin | 39da66ecc4a77b94a5afbbf0900727c8156b85e1 | 1433c319b5d85520c50aee00dd4b6f21a7e6366a | refs/heads/master | 2022-06-10T10:30:28.807874 | 2020-04-25T03:34:03 | 2020-04-25T03:34:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | cgi | #!/usr/bin/python
print ('Content-type: text/html\n')
import cgitb; cgitb.enable()
import psycopg
conn = psycopg.connect('dbname=foo user=bar')
curs = conn.cursor()
import cgi, sys
form = cgi.FieldStorage()
reply_to = form.getvalue('reply_to')
print ("""
<html>
<head>
<title>Compose Message</title>
</head>
<body>
<h1>Compose Message</h1>
<form action='save.cgi' method='POST'>
""")
subject = ''
if reply_to is not None:
print '<input type="hidden" name="reply_to" value="%s"/>' % reply_to
curs.execute('SELECT subject FROM messages WHERE id = %s' % reply_to)
subject = curs.fetchone()[0]
if not subject.startswith('Re: '):
subject = 'Re: ' + subject
print ()"""
<b>Subject:</b><br />
<input type='text' size='40' name='subject' value='%s' /><br />
<b>Sender:</b><br />
<input type='text' size='40' name='sender' /><br />
<b>Message:</b><br />
<textarea name='text' cols='40' rows='20'></textarea><br />
<input type='submit' value='Save'/>
</form>
<hr />
<a href='main.cgi'>Back to the main page</a>'
</body>
</html>
""" % subject)
| [
"noreply@github.com"
] | CodedQuen.noreply@github.com |
3ea8e542055d9af415f0df4954230bc1069c1696 | 45c142c3e3dc8d3211a86c77385ecfdd10d28fb9 | /dstore/engine/procedures/mi_GetInformationTypes_Ad_pb2.py | d044b357e309012a064a921161ed1a2bf83e91db | [] | no_license | dstore-io/dstore-sdk-python | 945d64995c8892af18fab26c90117245abec64a4 | 8494d12ac77c3c3cc6dd59026407ef514ad179fc | refs/heads/master | 2020-06-14T13:07:08.181547 | 2017-01-26T11:19:39 | 2017-01-26T11:19:39 | 75,177,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 7,421 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: dstore/engine/procedures/mi_GetInformationTypes_Ad.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from dstore import values_pb2 as dstore_dot_values__pb2
from dstore.engine import engine_pb2 as dstore_dot_engine_dot_engine__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='dstore/engine/procedures/mi_GetInformationTypes_Ad.proto',
package='dstore.engine.mi_GetInformationTypes_Ad',
syntax='proto3',
serialized_pb=_b('\n8dstore/engine/procedures/mi_GetInformationTypes_Ad.proto\x12\'dstore.engine.mi_GetInformationTypes_Ad\x1a\x13\x64store/values.proto\x1a\x1a\x64store/engine/engine.proto\"\x0c\n\nParameters\"\xbc\x02\n\x08Response\x12\x38\n\x10meta_information\x18\x02 \x03(\x0b\x32\x1e.dstore.engine.MetaInformation\x12\'\n\x07message\x18\x03 \x03(\x0b\x32\x16.dstore.engine.Message\x12\x42\n\x03row\x18\x04 \x03(\x0b\x32\x35.dstore.engine.mi_GetInformationTypes_Ad.Response.Row\x1a\x88\x01\n\x03Row\x12\x0f\n\x06row_id\x18\x90N \x01(\x05\x12\x39\n\x13information_type_id\x18\x91N \x01(\x0b\x32\x1b.dstore.values.IntegerValue\x12\x35\n\x10information_type\x18\x92N \x01(\x0b\x32\x1a.dstore.values.StringValueBZ\n\x1bio.dstore.engine.proceduresZ;gosdk.dstore.de/engine/procedures/mi_GetInformationTypes_Adb\x06proto3')
,
dependencies=[dstore_dot_values__pb2.DESCRIPTOR,dstore_dot_engine_dot_engine__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PARAMETERS = _descriptor.Descriptor(
name='Parameters',
full_name='dstore.engine.mi_GetInformationTypes_Ad.Parameters',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=150,
serialized_end=162,
)
_RESPONSE_ROW = _descriptor.Descriptor(
name='Row',
full_name='dstore.engine.mi_GetInformationTypes_Ad.Response.Row',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='row_id', full_name='dstore.engine.mi_GetInformationTypes_Ad.Response.Row.row_id', index=0,
number=10000, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='information_type_id', full_name='dstore.engine.mi_GetInformationTypes_Ad.Response.Row.information_type_id', index=1,
number=10001, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='information_type', full_name='dstore.engine.mi_GetInformationTypes_Ad.Response.Row.information_type', index=2,
number=10002, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=345,
serialized_end=481,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='dstore.engine.mi_GetInformationTypes_Ad.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='meta_information', full_name='dstore.engine.mi_GetInformationTypes_Ad.Response.meta_information', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='dstore.engine.mi_GetInformationTypes_Ad.Response.message', index=1,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row', full_name='dstore.engine.mi_GetInformationTypes_Ad.Response.row', index=2,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RESPONSE_ROW, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=165,
serialized_end=481,
)
_RESPONSE_ROW.fields_by_name['information_type_id'].message_type = dstore_dot_values__pb2._INTEGERVALUE
_RESPONSE_ROW.fields_by_name['information_type'].message_type = dstore_dot_values__pb2._STRINGVALUE
_RESPONSE_ROW.containing_type = _RESPONSE
_RESPONSE.fields_by_name['meta_information'].message_type = dstore_dot_engine_dot_engine__pb2._METAINFORMATION
_RESPONSE.fields_by_name['message'].message_type = dstore_dot_engine_dot_engine__pb2._MESSAGE
_RESPONSE.fields_by_name['row'].message_type = _RESPONSE_ROW
DESCRIPTOR.message_types_by_name['Parameters'] = _PARAMETERS
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
Parameters = _reflection.GeneratedProtocolMessageType('Parameters', (_message.Message,), dict(
DESCRIPTOR = _PARAMETERS,
__module__ = 'dstore.engine.procedures.mi_GetInformationTypes_Ad_pb2'
# @@protoc_insertion_point(class_scope:dstore.engine.mi_GetInformationTypes_Ad.Parameters)
))
_sym_db.RegisterMessage(Parameters)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
Row = _reflection.GeneratedProtocolMessageType('Row', (_message.Message,), dict(
DESCRIPTOR = _RESPONSE_ROW,
__module__ = 'dstore.engine.procedures.mi_GetInformationTypes_Ad_pb2'
# @@protoc_insertion_point(class_scope:dstore.engine.mi_GetInformationTypes_Ad.Response.Row)
))
,
DESCRIPTOR = _RESPONSE,
__module__ = 'dstore.engine.procedures.mi_GetInformationTypes_Ad_pb2'
# @@protoc_insertion_point(class_scope:dstore.engine.mi_GetInformationTypes_Ad.Response)
))
_sym_db.RegisterMessage(Response)
_sym_db.RegisterMessage(Response.Row)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.dstore.engine.proceduresZ;gosdk.dstore.de/engine/procedures/mi_GetInformationTypes_Ad'))
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
# @@protoc_insertion_point(module_scope)
| [
"b.dolkemeier@dbap.de"
] | b.dolkemeier@dbap.de |
6b4038c50a12b01cae8895691a41ee3c55405e9e | 6e8f2e28479566dbaa338300b2d61f784ff83f97 | /.history/code/datasetup_20210414101740.py | 1b100c5fafb593a7c48ab039e9ecf27287a49d16 | [] | no_license | eeng5/CV-final-project | 55a7d736f75602858233ebc380c4e1d67ab2b866 | 580e28819560b86f6974959efb1d31ef138198fc | refs/heads/main | 2023-04-09T21:28:21.531293 | 2021-04-21T19:57:22 | 2021-04-21T19:57:22 | 352,703,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,783 | py | from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pandas as pd
import cv2
import os
import glob
from pathlib import Path
def cleanTestDirs():
emotions = []
for f in Path('/Users/Natalie/Desktop/cs1430/CV-final-project/data/test/angry').glob('*.jpg'):
try:
#f.unlink()
os.remove(f)
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
def cleanTrainDirs():
for f in Path('/Users/Natalie/Desktop/cs1430/CV-final-project/data/train/angry').glob('*.jpg'):
try:
os.remove(f)
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
def cleanAll():
cleanTestDirs()
cleanTrainDirs()
def createPixelArray(arr):
arr = list(map(int, arr.split()))
array = np.array(arr, dtype=np.uint8)
array = array.reshape((48, 48))
return array
def equalize_hist(img):
img = cv2.equalizeHist(img)
return img
def showImages(imgs):
_, axs = plt.subplots(1, len(imgs), figsize=(20, 20))
axs = axs.flatten()
for img, ax in zip(imgs, axs):
ax.imshow(img,cmap=plt.get_cmap('gray'))
plt.show()
def augmentIMG(img, task):
imgs = [img]
img1 = equalize_hist(img)
imgs.append(img1)
if(task == 3):
img2 = cv2.bilateralFilter(img1, d=9, sigmaColor=75, sigmaSpace=75)
imgs.append(img2)
img6 = cv2.flip(img, 1) # flip horizontally
imgs.append(img6)
return imgs
def saveIMG(arr, num, folderLoc):
im = Image.fromarray(arr)
filename = folderLoc + "image_"+ num+".jpg"
im.save(filename)
def createTrain(emotion_dict, task):
df = pd.read_csv('/Users/Natalie/Desktop/cs1430/CV-final-project/data/icml_face_data.csv') # CHANGE ME
base_filename = "/Users/Natalie/Desktop/cs1430/CV-final-project/data/train/" # CHANGE ME
for index, row in df.iterrows():
if (row[' Usage'] == "Training"):
px = row[' pixels']
emot = int(row['emotion'])
emot_loc = emotion_dict[emot]
filename = base_filename + emot_loc
img = createPixelArray(px)
img_arr = augmentIMG(img, task)
idx = 0
for i in img_arr:
num = str(index) + "_" + str(idx)
idx +=1
saveIMG(i, num, filename)
def createTest(emotion_dict , task):
df = pd.read_csv('/Users/Natalie/Desktop/cs1430/CV-final-project/data/icml_face_data.csv') # CHANGE ME
base_filename = "/Users/Natalie/Desktop/cs1430/CV-final-project/data/test/" # CHANGE ME
for index, row in df.iterrows():
if (row[' Usage'] == "PublicTest"):
px = row[' pixels']
emot = int(row['emotion'])
emot_loc = emotion_dict[emot]
filename = base_filename + emot_loc
img = createPixelArray(px)
img_arr = augmentIMG(img, task)
idx = 0
for i in img_arr:
num = str(index) + "_" + str(idx)
idx +=1
saveIMG(i, num, filename)
def createEmotionDict():
emotionDict = {}
emotionDict[0]="angry/"
emotionDict[1]="disgust/"
emotionDict[2]="fear/"
emotionDict[3]="happy/"
emotionDict[4]="sad/"
emotionDict[5]="surprise/"
emotionDict[6] = "neutral/"
return emotionDict
def createSimpleData():
cleanAll()
print("Cleaning done")
emot_dict = createEmotionDict()
createTrain(emot_dict, 1)
print("Training done")
createTest(emot_dict, 1)
print("Testing done")
def createComplexData():
cleanAll()
emot_dict = createEmotionDict()
createTrain(emot_dict, 3)
createTest(emot_dict, 3)
def main():
emot_dict = createEmotionDict()
if __name__ == '__main__':
main() | [
"natalie_rshaidat@brown.edu"
] | natalie_rshaidat@brown.edu |
7bcf641d35d5803e3a09acdb8776d9a11ac344a1 | 2cc1097ec2941158be3466ec9a3ad54a02e92c83 | /gutenbergToTei.py | 1870403c73e0f362c5dce4af4d4ddfba0a4fcf6e | [] | no_license | mpetyx/gutenbergToTei | 418583a6905be6aaf3776cc02daaa5d6668675d7 | c8fd98fb832672a521e51403756f9d077fcceca6 | refs/heads/master | 2020-05-20T03:08:13.510126 | 2013-12-16T12:19:59 | 2013-12-16T12:19:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,427 | py | # gutenbergToTei.py
#
# Reformats and renames etexts downloaded from Project Gutenberg.
#
# Software adapted from Michiel Overtoom, motoom@xs4all.nl, july 2009.
#
# Modified by Matthew Jockers August 17, 2010 to encode result into TEI based XML
#
import os
import re
import shutil
remove = ["Produced by","End of the Project Gutenberg","End of Project Gutenberg"]
def beautify(fn, outputDir, filename):
''' Reads a raw Project Gutenberg etext, reformat paragraphs,
and removes fluff. Determines the title of the book and uses it
as a filename to write the resulting output text. '''
lines = [line.strip() for line in open(fn)]
collect = False
lookforsubtitle = False
outlines = []
startseen = endseen = False
title=""
one="<?xml version=\"1.0\" encoding=\"utf-8\"?><TEI xmlns=\"http://www.tei-c.org/ns/1.0\" version=\"5.0\"><teiHeader><fileDesc><titleStmt>"
two = "</titleStmt><publicationStmt><publisher></publisher><pubPlace></pubPlace><availability status=\"free\"><p>Project Gutenberg</p></availability></publicationStmt><seriesStmt><title>Project Gutenberg Full-Text Database</title></seriesStmt><sourceDesc default=\"false\"><biblFull default=\"false\"><titleStmt>"
three = "</titleStmt><extent></extent><publicationStmt><publisher></publisher><pubPlace></pubPlace><date></date></publicationStmt></biblFull></sourceDesc></fileDesc><encodingDesc><editorialDecl default=\"false\"><p>Preliminaries omitted.</p></editorialDecl></encodingDesc></teiHeader><text><body><div>"
for line in lines:
if line.startswith("Author: "):
author = line[8:]
authorTemp = line[8:]
continue
if line.startswith("Title: "):
title = line[7:]
titleTemp = line[7:]
lookforsubtitle = True
continue
if lookforsubtitle:
if not line.strip():
lookforsubtitle = False
else:
subtitle = line.strip()
subtitle = subtitle.strip(".")
title += ", " + subtitle
if ("*** START" in line) or ("***START" in line):
collect = startseen = True
paragraph = ""
continue
if ("*** END" in line) or ("***END" in line):
endseen = True
break
if not collect:
continue
if (titleTemp) and (authorTemp):
outlines.append(one)
outlines.append("<title>")
outlines.append(titleTemp)
outlines.append("</title>")
outlines.append("<author>")
outlines.append(authorTemp)
outlines.append("</author>")
outlines.append(two)
outlines.append("<title>")
outlines.append(titleTemp)
outlines.append("</title>")
outlines.append("<author>")
outlines.append(authorTemp)
outlines.append("</author>")
outlines.append(three)
authorTemp = False
titleTemp = False
continue
if not line:
paragraph = paragraph.strip()
for term in remove:
if paragraph.startswith(term):
paragraph = ""
if paragraph:
paragraph = paragraph.replace("&", "&")
outlines.append(paragraph)
outlines.append("</p>")
paragraph = "<p>"
else:
paragraph += " " + line
# Compose a filename. Replace some illegal file name characters with alternatives.
#ofn = author + title[:150] + ".xml"
ofn = filename
ofn = ofn.replace("&", "")
ofn = ofn.replace("/", "")
ofn = ofn.replace("\"", "")
ofn = ofn.replace(":", "")
ofn = ofn.replace(",,", "")
ofn = ofn.replace(" ", "")
ofn = ofn.replace("txt", "xml")
outlines.append("</div></body></text></TEI>")
text = "\n".join(outlines)
text = re.sub("End of the Project Gutenberg .*", "", text, re.M)
text = re.sub("Produced by .*", "", text, re.M)
text = re.sub("<p>\s+<\/p>", "", text)
text = re.sub("\s+", " ", text)
f = open(outputDir+ofn, "wt")
f.write(text)
f.close()
sourcepattern = re.compile(".*\.txt$")
sourceDir = "/Path/to/your/ProjectGutenberg/files/"
outputDir = "/Path/to/your/ProjectGutenberg/TEI/Output/files/"
for fn in os.listdir(sourceDir):
if sourcepattern.match(fn):
beautify(sourceDir+fn, outputDir, fn)
| [
"mpetyx@gmail.com"
] | mpetyx@gmail.com |
642c4c69096cef5756541a7d1f80cb48a9371758 | a0717328fdb8537251e8cccce25103c4fc97b172 | /web/backend/source/db/__init__.py | 3c4adec769160c35d229bf20e3f885df8a9e454f | [] | no_license | rodobre/smarthack | bee6c4d9b81cc1af3036dde9b81178cfa7e05bd1 | 0a76c089ec23610fd78e396bf7aa5d8c793882ef | refs/heads/master | 2020-09-01T14:22:22.478647 | 2019-11-03T08:10:09 | 2019-11-03T08:10:09 | 218,978,545 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from redis_collections import Dict
import redislite
# database stored in a file (simplest way)
# TODO: change engine type if needed
db_uri = "sqlite:///database.sql"
engine = create_engine(db_uri)
Base = declarative_base()
Session = sessionmaker(bind = engine)
cache_uri = 'storage.rdb'
redis_connection = redislite.StrictRedis(cache_uri)
Cache = Dict(redis=redis_connection, key='storage')
| [
"mihailferaru2000@gmail.com"
] | mihailferaru2000@gmail.com |
17283d50974695ebd93d2e2f98cdaf3efb4724f5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03438/s143987298.py | 8d742e71452282deac9bebdbc846d1dab746618c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | ma = lambda :map(int,input().split())
lma = lambda :list(map(int,input().split()))
tma = lambda :tuple(map(int,input().split()))
ni = lambda:int(input())
yn = lambda fl:print("Yes") if fl else print("No")
import collections
import math
import itertools
import heapq as hq
ceil = math.ceil
n = ni()
A = lma()
B = lma()
D = [B[i]-A[i] for i in range(n)]
tmp = 0
for d in D:
if d>0:
d//=2
tmp+=d
f=False
if tmp>=0:
f=True
yn(f)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f0e447ddffc1c9a7edeeb540fb89de63cea9b26c | ef243d91a1826b490e935fa3f3e6c29c3cc547d0 | /PyQt5/QtWidgets/QStyleOptionTabWidgetFrame.py | 6fc9ddbe4f0b04937109786e7e89b57e0c786274 | [] | no_license | VentiFang/Python_local_module | 6b3d0b22399e817057dfd15d647a14bb1e41980e | c44f55379eca2818b29732c2815480ee755ae3fb | refs/heads/master | 2020-11-29T11:24:54.932967 | 2019-12-25T12:57:14 | 2019-12-25T12:57:14 | 230,101,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | # encoding: utf-8
# module PyQt5.QtWidgets
# from F:\Python\Python36\lib\site-packages\PyQt5\QtWidgets.pyd
# by generator 1.147
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import PyQt5.QtGui as __PyQt5_QtGui
import sip as __sip
from .QStyleOption import QStyleOption
class QStyleOptionTabWidgetFrame(QStyleOption):
"""
QStyleOptionTabWidgetFrame()
QStyleOptionTabWidgetFrame(QStyleOptionTabWidgetFrame)
"""
def __init__(self, QStyleOptionTabWidgetFrame=None): # real signature unknown; restored from __doc__ with multiple overloads
pass
Type = 11
Version = 2
| [
"5149528+ventifang@user.noreply.gitee.com"
] | 5149528+ventifang@user.noreply.gitee.com |
ce485319554c320f209c63112d8997942295f3fe | 6119d11cc09c3604b4dd50f590126573d49f32e2 | /illiad_article_handler_app/tests.py | 1f61a9cc6b32d9cf0b524765ff676e5dc9556160 | [] | no_license | Brown-University-Library/illiad_article_handler_project | 393ed1bdda373e10d4ed3a6b886ef3d19ebea726 | 730d87eab14d826a86c572896a99dd124f785e9b | refs/heads/main | 2023-08-03T03:22:07.049285 | 2021-09-22T14:18:37 | 2021-09-22T14:18:37 | 402,741,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | import logging
# from django.test import TestCase (test commit)
from django.test import SimpleTestCase as TestCase ## TestCase requires db, this doesn't
log = logging.getLogger(__name__)
# class Url_Test( TestCase ):
# """ Checks redirect output. """
# def test_redirect(self):
# """ Checks that redirect is returned. """
# response = self.client.get( '/handler/', {'ItemTitle': 'some title' } ) # project root part of url is assumed
# self.assertEqual( 302, response.status_code ) # redirect
# self.assertEqual( 'https://jcbl.aeon.atlas-sys.com/aeon.dll?ItemTitle=some+title', response.headers['Location'] )
# def test_colon(self):
# """ Checks that colon is encoded. """
# response = self.client.get( '/handler/', {'ItemTitle': 'some: title' } ) # project root part of url is assumed
# self.assertEqual( 'https://jcbl.aeon.atlas-sys.com/aeon.dll?ItemTitle=some%3A+title', response.headers['Location'] )
| [
"birkin.diana@gmail.com"
] | birkin.diana@gmail.com |
cbea9fe9444f660a8102289ab002207f5f9e2d8c | be429a1e5e4903616a4532c1bf238df20fea75c0 | /6.8/671.二叉树中第二小的节点.py | 092fde588c9f0ce330899c0d7faf5c961c923a0c | [] | no_license | pythonnewbird/LeetCodeSolution | ccc8cc17df4cea3109d84b0c347ae91c1bc33a28 | 2447f760f08fb3879c5f03d8650e30ff74115d3d | refs/heads/master | 2020-03-19T05:06:06.681429 | 2018-07-01T12:39:09 | 2018-07-01T12:39:09 | 135,899,944 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findSecondMinimumValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.ans = 0x80000000
minVal = root.val
def traverse(root):
if not root: return
if self.ans > root.val > minVal:
self.ans = root.val
traverse(root.left)
traverse(root.right)
traverse(root)
return self.ans if self.ans != 0x80000000 else -1 | [
"21637007@zju.edu.cn"
] | 21637007@zju.edu.cn |
96f26415c59c3f5d4cc5ad55a367af99e8cd7b23 | 75569ed16c90c7e4081e4ef3e5caafe8a622830f | /tests/basics/class-super.py | 6a87b2fd00c817af2924ff7e056f1ce7aab3b18f | [
"MIT"
] | permissive | aitjcize/micropython | 22b153e88d2f2c8c44c92ac9b9eeee0396ed385a | 203bc98804dd8ad60476b531f29b6658dcffadcf | refs/heads/master | 2020-05-20T17:56:26.304832 | 2014-04-15T11:52:59 | 2014-04-15T11:52:59 | 18,765,665 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | class Base:
def meth(self):
print("in Base meth")
class Sub(Base):
def meth(self):
print("in Sub meth")
return super().meth()
a = Sub()
a.meth()
| [
"pfalcon@users.sourceforge.net"
] | pfalcon@users.sourceforge.net |
34934364d6e50c0f4baf9fbc1cb465d2decbba84 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa2/sample/ast_coverage-249.py | 2b992ce35be39fd106d6df923513d9ec2dd95fb2 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | count:int = 0
def foo(s: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[$INT]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
c1847152d25bfcdf25efa91b2c9419a6b9daf9e7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03128/s188279652.py | ab5fe02baf01861864082c97175ad115cba641b4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | # coding: utf-8
import sys
sr = lambda: sys.stdin.readline().rstrip()
ir = lambda: int(sr())
lr = lambda: list(map(int, sr().split()))
# まずは桁数を増やす→数字の大きい順に並べる
N, M = lr()
A = lr()
matches = [0, 2, 5, 5, 4, 5, 6, 3, 7, 6]
A = [(matches[a], a) for a in A]
A.sort(key = lambda x: x[1], reverse=True)
A.sort(key = lambda x: x[0])
top_match = A[0][0]
dp = [None] * (N+1)
dp[0] = []
used = set()
for match, num in A:
if match in used:
continue
used.add(match)
for x in range(N+1):
if x - match < 0:
continue
if dp[x] == None and dp[x-match] != None:
dp[x] = dp[x-match] + [num]
elif dp[x] != None and dp[x-match] != None:
if len(dp[x-match]) >= len(dp[x]):
dp[x] = dp[x-match] + [num]
elif len(dp[x-match]) >= 1 and len(dp[x-match]) == len(dp[x]) - 1:
y = list(map(str, dp[x][::-1])); y.sort(reverse=True)
z = [str(num)] + list(map(str, dp[x-match][::-1])); z.sort(reverse=True)
y = int(''.join(y))
z = int(''.join(z))
if z > y:
dp[x] = dp[x-match] + [num]
X = dp[N]
X.sort(reverse=True)
answer = ''.join(list(map(str, X)))
print(answer)
# 37
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
292d385070572bf7f16310dfa66e9e0cbca0ef5c | 59fb17c240b261040026d713a6ac9c97d6a9f265 | /gym/gym/utils/colorize.py | ac7ea10f6f91bb8e623c462fa4a32657fde70f0a | [
"MIT"
] | permissive | dmeger/TeachingImitation | 3fb97499e76929959913266f127154f6ae5a8e99 | 5f4dba7e49987924c3d55cd27579cad4c71ef7a4 | refs/heads/master | 2023-03-28T13:25:01.307382 | 2021-04-06T15:07:08 | 2021-04-06T15:07:08 | 355,223,500 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | """A set of common utilities used within the environments. These are
not intended as API functions, and will not remain stable over time.
"""
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight = False):
"""Return string surrounded by appropriate terminal color codes to
print colorized text. Valid colors: gray, red, green, yellow,
blue, magenta, cyan, white, crimson
"""
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
attrs = ';'.join(attr)
return '\x1b[%sm%s\x1b[0m' % (attrs, string)
| [
"david.meger@gmail.com"
] | david.meger@gmail.com |
a73ca2447eb10abc6d70d53b6c07766887780a87 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_navahos.py | 2d6837a91058d376c49ef6307b7b9b61348d2dda | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _NAVAHOS():
def __init__(self,):
self.name = "NAVAHOS"
self.definitions = navaho
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['navaho']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
08b66af75db565b43692361530227c0edc3a2a89 | 04b4198a006d4527432ca8de8bf92cc5f9ded3de | /logistic.py | 3a73d7e510fa01243e5e4603d0add8223f4868de | [] | no_license | chenzhengsi1988/try | 4986623077a1bed6f40c3ed0327f1e96eea4a6ef | 5c448a6da317cd0853ec24db108a3e7237a2153e | refs/heads/master | 2021-09-06T17:49:06.923387 | 2018-02-09T08:49:49 | 2018-02-09T08:49:49 | 111,104,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,716 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 15 11:20:58 2017
@author: zsc
"""
from __future__ import print_function, division
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn
from sklearn.cross_validation import train_test_split
import random
# 读取数据
data = pd.read_csv("~/ml/data/datatraining.txt")
print(type(data))
print(data)
# 拆分数据
X_train, X_test, y_train, y_test = train_test_split(
data[["Temperature", "Humidity", "Light", "CO2", "HumidityRatio"]].values, data["Occupancy"].values.reshape(-1, 1),
random_state=42)
# one-hot 编码
# print(y_train.shape)
y_train = tf.concat([1 - y_train, y_train], 1)
y_test = tf.concat([1 - y_test, y_test], 1)
# print(y_train.shape)
# 设置模型
learning_rate = 0.001
training_epoch = 5
batch_size = 100
display_step = 1
n_samples = X_train.shape[0]
# print(n_samples)
n_features = 5
n_class = 2
x = tf.placeholder(tf.float32, [None, n_features])
y = tf.placeholder(tf.float32, [None, n_class])
# 模型参数
W = tf.Variable(tf.zeros([n_features, n_class]))
b = tf.Variable(tf.zeros([n_class]))
# W = tf.Variable(tf.truncated_normal([n_features, n_class-1]))
# b = tf.Variable(tf.truncated_normal([n_class]))
# 计算预测值
pred = tf.nn.softmax(tf.matmul(x, W) + b)
# 计算损失值 使用相对熵计算损失值
cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=1))
# 定义优化器
# optimizer=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# 准确率
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 初始化所有变量
init = tf.initialize_all_variables()
aa = list()
bb = list()
# 训练模型
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epoch):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_xs = X_train[i * batch_size: (i + 1) * batch_size]
batch_ys = sess.run(y_train[i * batch_size: (i + 1) * batch_size])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, y: batch_ys})
avg_cost += c / total_batch
if (epoch + 1) % display_step == 0:
print("Epoch:", "%04d" % (epoch + 1), "cost=", avg_cost)
aa.append(epoch + 1)
bb.append(avg_cost)
print("Optimization Finished!")
print("Testing Accuracy:", accuracy.eval({x: X_train, y: y_train.eval()}))
plt.xlabel("Epoch")
plt.ylabel("Cost")
plt.plot(aa, bb)
plt.show()
| [
"you@example.com"
] | you@example.com |
13dffa6919ac9b8c70595168f8ee24846398cf7e | 48d30fa3d9806fee872e76393e77900c6aab8717 | /djangochat/settings.py | f38b35058f2c0fbb98e59b355a7123e25440709e | [] | no_license | olivx/django-vue-chat | dbaca42d8c4531d4d803e376839e8fb6fbad722b | f4100513a7fa1d170d7a4973b0edb456ade00c2d | refs/heads/master | 2020-04-24T01:07:10.055368 | 2019-02-26T14:52:28 | 2019-02-26T14:52:28 | 171,587,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,571 | py | """
Django settings for djangochat project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from dotenv import load_dotenv
load_dotenv()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djoser',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'djangochat.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangochat.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv('DB_NAME'),
'USER': os.getenv('DB_USER'),
'PASSWORD': os.getenv('DB_PASSWORD'),
'HOST': os.getenv('DB_HOST'),
'PORT': os.getenv('DB_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
}
CORS_ORIGIN_ALLOW_ALL = True
| [
"oliveiravicente.net@gmail.com"
] | oliveiravicente.net@gmail.com |
52b92cc0359020e208aa18e832e1ad7cec007240 | 51d602577affebc8d91ffe234f926469d389dc75 | /lis/specimen/lab_aliquot/models/base_aliquot.py | 881efd0a10e60181bdeffbc6009a840b110f194b | [] | no_license | botswana-harvard/lis | 5ac491373f74eaf3855f173580b000539d7f4740 | 48dc601ae05e420e8f3ebb5ea398f44f02b2e5e7 | refs/heads/master | 2020-12-29T01:31:07.821681 | 2018-06-24T06:06:57 | 2018-06-24T06:06:57 | 35,820,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | import datetime
from django.utils import timezone
from django.db import models
from django.core.urlresolvers import reverse
from ..choices import ALIQUOT_STATUS, SPECIMEN_MEASURE_UNITS, SPECIMEN_MEDIUM
class BaseAliquot (models.Model):
primary_aliquot = models.ForeignKey('self',
null=True,
related_name='primary',
editable=False)
source_aliquot = models.ForeignKey('self',
null=True,
related_name='source',
editable=False,
help_text='Aliquot from which this aliquot was created, Leave blank if this is the primary tube')
aliquot_identifier = models.CharField(
verbose_name='Aliquot Identifier',
max_length=25,
unique=True,
help_text="Aliquot identifier",
editable=False)
aliquot_datetime = models.DateTimeField(
verbose_name="Date and time aliquot created",
default=timezone.now)
count = models.IntegerField(
editable=False,
null=True)
medium = models.CharField(
verbose_name='Medium',
max_length=25,
choices=SPECIMEN_MEDIUM,
default='TUBE')
original_measure = models.DecimalField(
max_digits=10,
decimal_places=2,
default='5.00')
current_measure = models.DecimalField(
max_digits=10,
decimal_places=2,
default='5.00')
measure_units = models.CharField(
max_length=25,
choices=SPECIMEN_MEASURE_UNITS,
default='mL')
status = models.CharField(
max_length=25,
choices=ALIQUOT_STATUS,
default='available')
comment = models.CharField(
max_length=50,
null=True,
blank=True)
subject_identifier = models.CharField(
max_length=50,
null=True,
editable=False,
help_text="non-user helper field to simplify search and filtering")
is_packed = models.BooleanField(
verbose_name='packed',
default=False)
receive_identifier = models.CharField(
max_length=25,
null=True,
editable=False,
help_text="non-user helper field to simplify search and filter")
def __str__(self):
return '%s' % (self.aliquot_identifier)
def save(self, *args, **kwargs):
self.receive_identifier = self.receive.receive_identifier
super(BaseAliquot, self).save(*args, **kwargs)
def natural_key(self):
return (self.aliquot_identifier,)
def get_subject_identifier(self):
return self.subject_identifier
def get_visit(self):
return self.get_visit_model().objects.get(subject_identifier=self.get_subject_identifier())
def drawn(self):
return self.receive.drawn_datetime
def barcode_value(self):
return self.aliquot_identifier
def to_receive(self):
url = reverse('admin:{app_label}_receive_changelist'.format(app_label=self._meta.app_label,))
return '<a href="{url}?q={receive_identifier}">{receive_identifier}</a>'.format(
url=url, app_label=self._meta.app_label, receive_identifier=self.receive.receive_identifier)
to_receive.allow_tags = True
class Meta:
abstract = True
| [
"ckgathi@gmail.com"
] | ckgathi@gmail.com |
173faf68071a21ad7c81089f2ddfb4d75bfbdac2 | 6930a434c0506d44bf8a8e81cb86e95c219c3a77 | /python/day09/code/dict2.py | ce9b66edc2c41b3b8bb0cbac196501f4f251a01f | [] | no_license | Conquerk/test | ed15d5603538340559556c9e0f20cc61ad3e4486 | 7ff42c99b8a2132c6dd1c73315ff95cfef63a8f6 | refs/heads/master | 2020-04-19T01:47:28.322929 | 2019-01-28T01:52:00 | 2019-01-28T01:52:00 | 167,882,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | #字典行参
def fun(**kwargs):
print("关键字传参个数是",len(kwargs))
print("kwargs",kwargs)
fun(a=1,b="BBBB",c=[2,3,4])#关键字传参
fun(a=1,b=2,c=3,d=4) | [
"tarena@tedu.cn"
] | tarena@tedu.cn |
d86dc0cb296c60aa5c62e35bd80bec4fb557775d | 9787a86bd6721062a8cf7cc04c21c092a4aeb4a0 | /dapper/mods/Lorenz63/anderson2010rhf.py | 93c7177874a95116146c42bb3ff40c051691fcd3 | [
"MIT"
] | permissive | 1895-art/DAPPER | 09d8b6a88c8997ad7f190f96930be559b43ee143 | bfc4075782f6b247a70cd5d04d4308d135ea5379 | refs/heads/master | 2023-01-19T23:14:13.370754 | 2020-11-23T16:01:21 | 2020-11-23T16:01:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | """As in Anderson 2010 rank histogram filter"""
import numpy as np
import dapper as dpr
from dapper.mods.Lorenz63 import Tplot, dstep_dx, step, x0
t = dpr.Chronology(0.01, dkObs=12, KObs=1000, Tplot=Tplot, BurnIn=4*Tplot)
Nx = len(x0)
Dyn = {
'M': Nx,
'model': step,
'linear': dstep_dx,
'noise': 0
}
X0 = dpr.GaussRV(C=2, mu=x0)
Obs = dpr.partial_Id_Obs(Nx, np.arange(Nx))
Obs['noise'] = 8.0
HMM = dpr.HiddenMarkovModel(Dyn, Obs, t, X0)
####################
# Suggested tuning
####################
# Compare with Anderson's figure 10.
# Benchmarks are fairly reliable (KObs=2000):
# from dapper.mods.Lorenz63.anderson2010rhf import HMM # rmse.a
# xps += SL_EAKF(N=20,infl=1.01,rot=True,loc_rad=np.inf) # 0.87
# xps += EnKF_N (N=20,rot=True) # 0.87
# xps += RHF (N=50,infl=1.10) # 1.28
# xps += RHF (N=50,infl=0.95,rot=True) # 0.94
# xps += RHF (N=20,infl=0.95,rot=True) # 1.07
| [
"patrick.n.raanes@gmail.com"
] | patrick.n.raanes@gmail.com |
086b12d7a882e9d4d5550998cef39d299a4472d5 | 929e65367e6cd2115990456b017c16938e1012b1 | /Import package.py | 0dade4b7680e697e7bafb0ac74f5aeeb6100d2e7 | [] | no_license | abhiwer/Introduction-to-Python | 261437a5ec5929fef92f467d75347d1c1c8aae2d | 9fed8cb117c441659196a901fad9ea93f59deeb5 | refs/heads/master | 2022-11-29T23:56:11.828440 | 2020-08-08T17:32:39 | 2020-08-08T17:32:39 | 286,092,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py |
'''
Import package
As a data scientist, some notions of geometry never hurt. Let's refresh some of the basics.
For a fancy clustering algorithm, you want to find the circumference, C, and area, A, of a circle. When the radius of the circle is r, you can calculate C and A as:
C=2πr
A=πr2
To use the constant pi, you'll need the math package. A variable r is already coded in the script. Fill in the code to calculate C and A and see how the print() functions create some nice printouts.
Instructions
100 XP
Import the math package. Now you can access the constant pi with math.pi.
Calculate the circumference of the circle and store it in C.
Calculate the area of the circle and store it in A.
Take Hint (-30 XP)
'''
# Definition of radius
r = 0.43
# Import the math package
import math
# Calculate C
C = 2*math.pi*r
# Calculate A
A = math.pi*r*r
# Build printout
print("Circumference: " + str(C))
print("Area: " + str(A)) | [
"rajanpan97@gmail.com"
] | rajanpan97@gmail.com |
eaf64dbae77589cd3f1c16332b15cd409f93a1db | 8b53a8b9803d92003f3a3a9e1b08def7642ba35d | /TALLERES/TAL7_FUNCIONES_20210316_cur/ej23_funcionesreturn_listapositnegat.py | cc1e48bcad49794b734bf0271aba9bb904e9bb83 | [] | no_license | smarulan613/fundamentos_prog_20211_sebasmc | 637cdf9e1f61de0f876fe74530df4e6a5b40d6a6 | 0a87d81dae2bd5656a3e6a521585da661efe6cf6 | refs/heads/main | 2023-05-02T04:26:47.035698 | 2021-05-27T03:37:05 | 2021-05-27T03:37:05 | 356,059,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 22 15:14:20 2021
@author: Sebastian Marulanda Correa
Ejercicio 23 curso. funciones Python
Confeccionar un programa que permita:
1) Cargar una lista de 10 elementos enteros.
2) Generar dos listas a partir de la primera. En una guardar los valores
positivos y en otra los negativos.
3) Imprimir las dos listas generadas.
"""
def cargar():
lista=[]
for x in range(10):
valor=int(input("Ingrese valor:"))
lista.append(valor)
return lista
def generar_listas(lista):
listanega=[]
listaposi=[]
for x in range(len(lista)):
if lista[x]<0:
listanega.append(lista[x])
else:
if lista[x]>0:
listaposi.append(lista[x])
return [listanega,listaposi]
def imprimir(lista):
for x in range(len(lista)):
print(lista[x])
# programa principal
lista=cargar()
listanega,listaposi=generar_listas(lista)
print("Lista con los valores negativos")
imprimir(listanega)
print("Lista con los valores positivos")
imprimir(listaposi) | [
"noreply@github.com"
] | smarulan613.noreply@github.com |
86b7939905609e12215640e9f40f41369636f45e | 8b86009da06c2ac987e2a02dcbce15ebeb0e559c | /hyperanalysis/decomposition/base.py | 0c14e91c87b74085fb26add1c471d1db2404d2f4 | [
"MIT"
] | permissive | jiangqn/hyperanalysis-archive | df1a7373f066f8a572242dcd5ff39f1bbdd53f0a | 1e68d4a8674a2fbe7ae7566d1abd3881167039e5 | refs/heads/main | 2023-03-05T11:19:59.907392 | 2021-02-23T05:33:50 | 2021-02-23T05:33:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,362 | py | import torch
from typing import Tuple
class Decomposition(object):
"""
The base class of dimension reduction models.
"""
def __init__(self, n_components: int) -> None:
super(Decomposition, self).__init__()
self._n_components = n_components
self._is_trained = False
@property
def n_components(self) -> int:
return self._n_components
@property
def is_trained(self) -> bool:
return self._is_trained
# def fit(self):
# raise NotImplementedError("The fit method of the Decomposition class is not implemented.")
#
# def transform(self):
# raise NotImplementedError("The transform method of the Decomposition class is not implemented.")
#
# def fit_transform(self):
# raise NotImplementedError("The fit_transform method of the Decomposition class is not implemented.")
class UnsupervisedDecomposition(Decomposition):
"""
The base class of unsupervised dimension reduction models.
"""
def __init__(self, n_components: int) -> None:
super(UnsupervisedDecomposition, self).__init__(n_components)
def fit(self, X: torch.Tensor) -> None:
self._validate_inputs(X)
self._fit(X)
self._is_trained = True
def transform(self, X: torch.Tensor) -> torch.Tensor:
self._validate_inputs(X)
assert self.is_trained
return self._transform(X)
def fit_transform(self, X: torch.Tensor) -> torch.Tensor:
"""
:param X: FloatTensor (num, dim)
"""
self._validate_inputs(X)
self._fit(X)
self._is_trained = True
return self._transform(X)
def _fit(self, X: torch.Tensor) -> None:
"""
:param X: FloatTensor (num, dim)
"""
raise NotImplementedError("The _fit method is not implemented in the UnsupervisedDecomposition class.")
def _transform(self, X: torch.Tensor) -> torch.Tensor:
"""
:param X: FloatTensor (num, dim)
"""
raise NotImplementedError("The _transform method is not implemented in the UnsupervisedDecomposition class.")
def _validate_inputs(self, X: torch.Tensor) -> None:
"""
:param X: FloatTensor (num, dim)
"""
assert isinstance(X, torch.Tensor), "The type of input X is wrong."
assert len(X.size()) == 2, "This size of input X is wrong."
class SupervisedDecomposition(Decomposition):
"""
The base class of supervised dimension reduction models.
"""
def __init__(self, n_components: int) -> None:
super(SupervisedDecomposition, self).__init__(n_components)
def fit(self, X: torch.Tensor, y: torch.Tensor) -> None:
self._validate_inputs(X, y)
self._fit(X, y)
self._is_trained = True
def transform(self, X: torch.Tensor) -> torch.Tensor:
self._validate_inputs(X)
assert self.is_trained
return self._transform(X)
def fit_transform(self, X: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
self._validate_inputs(X, y)
self._fit(X, y)
self._is_trained = True
return self._transform(X)
def _fit(self, X: torch.Tensor, y: torch.Tensor) -> None:
"""
:param X: FloatTensor (num, dim)
:param y: FloatTensor (num,) or LongTensor (num,)
"""
raise NotImplementedError("The _fit method is not implemented in the SupervisedDecomposition class.")
def _transform(self, X: torch.Tensor) -> torch.Tensor:
"""
:param X: FloatTensor (num, dim)
"""
raise NotImplementedError("The _transform method is not implemented in the SupervisedDecomposition class.")
def _validate_inputs(self, X: torch.Tensor, y: torch.Tensor = None) -> None:
"""
:param X: FloatTensor (num, dim)
:param y: FloatTensor (num,) or LongTensor (num,)
"""
assert isinstance(X, torch.Tensor), "The type of input X is wrong."
assert len(X.size()) == 2, "This size of input X is wrong."
if y is not None:
assert isinstance(y, torch.Tensor), "The type of input y is wrong."
assert len(y.size()) == 1, "This size of input y is wrong."
assert X.size(0) == y.size(0), "The num of X is not equal to y."
class CrossDecomposition(Decomposition):
"""
The base class of cross dimension reduction models.
"""
def __init__(self, n_components: int) -> None:
super(CrossDecomposition, self).__init__(n_components)
def fit(self, X: torch.Tensor, Y: torch.Tensor) -> None:
self._validate_inputs(X, Y)
self._fit(X, Y)
self._is_trained = True
def transform(self, X: torch.Tensor, Y: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
self._validate_inputs(X, Y)
assert self.is_trained
return self._transform(X, Y)
def fit_transform(self, X: torch.Tensor, Y: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
self._validate_inputs(X, Y)
self._fit(X, Y)
self._is_trained = True
return self._transform(X, Y)
def _fit(self, X: torch.Tensor, Y: torch.Tensor) -> None:
"""
:param X: FloatTensor (num, dim1)
:param Y: FloatTensor (num, dim2)
"""
raise NotImplementedError("The _fit method is not implemented in the CrossDecomposition class.")
def _transform(self, X: torch.Tensor, Y: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param X: FloatTensor (num, dim1)
:param Y: FloatTensor (num, dim2)
:return X': FloatTensor (num, n_components)
:return Y': FloatTensor (num, n_components)
"""
raise NotImplementedError("The _transform method is not implemented in the CrossDecomposition class.")
def _validate_inputs(self, X: torch.Tensor, Y: torch.Tensor) -> None:
"""
:param X: FloatTensor (num, dim1)
:param Y: FloatTensor (num, dim2)
"""
assert isinstance(X, torch.Tensor), "The type of input X is wrong."
assert len(X.size()) == 2, "This size of input X is wrong."
assert isinstance(Y, torch.Tensor), "The type of input Y is wrong."
assert len(Y.size()) == 2, "This size of input Y is wrong."
assert X.size(0) == Y.size(0), "The num of X is not equal to Y." | [
"1935448858@qq.com"
] | 1935448858@qq.com |
f5e5f08367caa58f682b03c02fa22c8c90e2a22a | 6929a33a7259dad9b45192ca088a492085ed2953 | /solutions/0283-move-zeroes/move-zeroes.py | af0c715153e20b1875b77015c630bf46ec33870e | [] | no_license | moqi112358/leetcode | 70366d29c474d19c43180fd4c282cc02c890af03 | fab9433ff7f66d00023e3af271cf309b2d481722 | refs/heads/master | 2022-12-10T01:46:14.799231 | 2021-01-14T05:00:09 | 2021-01-14T05:00:09 | 218,163,960 | 3 | 0 | null | 2022-07-06T20:26:38 | 2019-10-28T23:26:47 | Python | UTF-8 | Python | false | false | 778 | py | # Given an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements.
#
# Example:
#
#
# Input: [0,1,0,3,12]
# Output: [1,3,12,0,0]
#
# Note:
#
#
# You must do this in-place without making a copy of the array.
# Minimize the total number of operations.
#
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
i, j = 0, 0
while j < len(nums):
if nums[i] != 0:
i += 1
j += 1
elif nums[i] == 0:
if nums[j] != 0:
nums[i], nums[j] = nums[j], nums[i]
else:
j += 1
| [
"983028670@qq.com"
] | 983028670@qq.com |
830d9c3bc9d6971c814cf9ad5c519397f7b4e369 | baff68c47362e9911b415e2d68b470a33da968ae | /src/add_metadata.py | 154521b3f22a398fb84578e0be287c216430a487 | [] | no_license | Assimila/ClimateRiskDisclosure | 6eacb8f91780d3b7e43c1b81b402fe632d112053 | 0a0bc23e8a117bbf23f05277043ea9ea70406265 | refs/heads/main | 2022-11-24T08:31:06.562223 | 2022-11-02T17:37:59 | 2022-11-02T17:37:59 | 243,382,947 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,901 | py |
import os
import sys
import gdal
from osgeo import gdal_array
import numpy as np
import datetime
variable = sys.argv[1]
#fname = f'../ERA5/Europe/{variable}/Europe_monthly_mean_{variable}_1979_2019.tif'
fname = f'../ERA5/Europe/{variable}/Europe_monthly_mean_{variable}_2002_2019.tif'
d = gdal.Open(fname)
data = d.ReadAsArray()
# Get datadir
datadir = os.path.dirname(fname)
bands, rows, cols = data.shape
driver = gdal.GetDriverByName('GTiff')
driver_options = ["COMPRESS=DEFLATE",
"BIGTIFF=YES",
"PREDICTOR=1",
"TILED=YES",
"BLOCKXSIZE=256",
"BLOCKYSIZE=256",
"INTERLEAVE=BAND"]
# Get projection and geotransform
proj = d.GetProjection()
gt = d.GetGeoTransform()
# Get GDAL datatype from NumPy datatype
dtype = gdal_array.NumericTypeCodeToGDALTypeCode(data.dtype)
# Create dataset
fname = os.path.join(datadir, f'{variable}.tif')
dst_ds = driver.Create(fname, cols, rows, bands, dtype, driver_options)
# Set cartographic projection
dst_ds.SetProjection(proj)
dst_ds.SetGeoTransform(gt)
# Dates
#startyear = 1979
#startmonth = 1
startyear = 2002
startmonth = 7
endyear = 2019
endmonth = 12
dates = [datetime.date(m//12, m%12+1, 1) for m in range(startyear*12+startmonth-1, endyear*12+endmonth)]
if not len(dates) == bands:
raise "Inconsistent number of bands for date range"
for i in range(bands):
dst_ds.GetRasterBand(i+1).WriteArray(data[i])
dst_band = dst_ds.GetRasterBand(i+1)
dst_band.SetMetadataItem('RANGEBEGINNINGDATE', dates[i].strftime("%Y-%m-%d"))
dst_ds = None
| [
"gerardo.lopezsaldana@assimila.eu"
] | gerardo.lopezsaldana@assimila.eu |
432a7901e1e5361f33fedadd22fc71f1071a8b47 | 0db19410e9751790af8ce4a0a9332293e379c02f | /mmpose/models/data_preprocessors/__init__.py | 7c9bd22e2b20be84a17d05ab3058efd8d934f261 | [
"Apache-2.0"
] | permissive | open-mmlab/mmpose | 2c9986521d35eee35d822fb255e8e68486026d94 | 537bd8e543ab463fb55120d5caaa1ae22d6aaf06 | refs/heads/main | 2023-08-30T19:44:21.349410 | 2023-07-04T13:18:22 | 2023-07-04T13:18:22 | 278,003,645 | 4,037 | 1,171 | Apache-2.0 | 2023-09-14T09:44:55 | 2020-07-08T06:02:55 | Python | UTF-8 | Python | false | false | 136 | py | # Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import PoseDataPreprocessor
__all__ = ['PoseDataPreprocessor']
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
8f9924d0cca9605376bdd8571df236762324976f | 4b896d1e582f2b18753d63221e458fb977bcab3a | /go/debug/objsize.py | c7d50bec22f3ac3f0c02190fe3bafc02f9907f5c | [] | no_license | klangner/deep-learning-go | 1589b86bccdfa229b0e947af2145dc3a48b9e939 | b8fc8062eac2d608161d8b52dae7d0aeaa0dbf0b | refs/heads/master | 2020-04-19T07:10:27.525346 | 2019-11-06T21:40:09 | 2019-11-06T21:40:09 | 168,039,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,842 | py | from __future__ import print_function
from sys import getsizeof, stderr
from itertools import chain
from collections import deque
try:
from reprlib import repr
except ImportError:
pass
def total_size(o, handlers={}, verbose=False):
""" Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
"""
dict_handler = lambda d: chain.from_iterable(d.items())
all_handlers = {tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter,
}
all_handlers.update(handlers) # user handlers take precedence
seen = set() # track which object id's have already been seen
default_size = getsizeof(0) # estimate sizeof object without __sizeof__
def sizeof(o):
if id(o) in seen: # do not double count the same object
return 0
seen.add(id(o))
s = getsizeof(o, default_size)
if verbose:
print(s, type(o), repr(o), file=stderr)
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
##### Example call #####
if __name__ == '__main__':
d = dict(a=1, b=2, c=3, d=[4,5,6,7], e='a string of chars')
print('Total size: {} KB'.format(total_size(d, verbose=True) // 1024)) | [
"klangner@gmail.com"
] | klangner@gmail.com |
88540f3f59978d61c2a4d2f65b37a0348c555d12 | 1825283527f5a479204708feeaf55f4ab6d1290b | /leetcode/python/252/sol.py | 9f045bf520410fe87ee096f43d4c9c6bcba54931 | [] | no_license | frankieliu/problems | b82c61d3328ffcc1da2cbc95712563355f5d44b5 | 911c6622448a4be041834bcab25051dd0f9209b2 | refs/heads/master | 2023-01-06T14:41:58.044871 | 2019-11-24T03:47:22 | 2019-11-24T03:47:22 | 115,065,956 | 1 | 0 | null | 2023-01-04T07:25:52 | 2017-12-22T02:06:57 | HTML | UTF-8 | Python | false | false | 401 | py |
My Python Solution
https://leetcode.com/problems/meeting-rooms/discuss/67812
* Lang: python3
* Author: yinfeng.zhang.9
* Votes: 21
def canAttendMeetings(self, intervals):
intervals.sort(key=lambda x: x.start)
for i in range(1, len(intervals)):
if intervals[i].start < intervals[i-1].end:
return False
return True
| [
"frankie.y.liu@gmail.com"
] | frankie.y.liu@gmail.com |
7eb01a752b3791f19bcdfa84bf02cf02c25aedef | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03416/s958545838.py | b462b8076901011891d089895dfad5d7ccd0140c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | A, B = map(int, input().split())
count = 0
for i in range(A, B+1):
num = str(i)
if num[0] == num[4]:
if num[1] == num[3]:
count += 1
print(count) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
fda7809b36d2aad19a3c58c0e669438ed43d6868 | 07c61596c1fba2e2a7034fe5af9707794ea2e2c1 | /Hackerrank/ProjectEuler/99.py3 | 2de3e1198f2cc5dd0ec84038640c552bb9ea446d | [] | no_license | H-Shen/Collection_of_my_coding_practice | 2fcb2f8fef9451ad4a3a9c063bbf6a34ea5966b4 | 6415552d38a756c9c89de0c774799654c73073a6 | refs/heads/master | 2023-08-24T21:19:08.886667 | 2023-08-22T03:47:39 | 2023-08-22T03:47:39 | 180,731,825 | 8 | 1 | null | 2021-08-13T18:25:25 | 2019-04-11T06:48:09 | C++ | UTF-8 | Python | false | false | 258 | py3 | from math import log
import heapq
A = []
n = int(input())
for i in range(n):
a, b = list(map(int, input().split()))
A.append([b * log(a), a, b])
k = int(input())
result = heapq.nsmallest(k, A, key = lambda x : x[0])[-1]
print(result[1], result[2])
| [
"haohu.shen@ucalgary.ca"
] | haohu.shen@ucalgary.ca |
f09b6648d2399c48d65cc7224e21b179ef864204 | 69a7257eae464b4598183eca15b324307fedadca | /parse_all.py | 6124fe630ae60d48fdeea63eca5b0a4f922da0df | [] | no_license | chenc10/SparkTrace | b1de2848f8eec44a9a0296c53ef97726d7a26d68 | 1e77dd334e8c52d26ab4991de222e1b2d573719b | refs/heads/master | 2016-09-14T11:43:31.928980 | 2016-05-16T01:06:03 | 2016-05-16T01:06:03 | 58,176,685 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | import os
import sys
import parse_logs
def main(argv):
dirname = argv[0]
print "Parsing files in ", dirname
agg_results_filename = os.path.join(dirname, "agg_results")
for filename in os.listdir(argv[0]):
full_name = os.path.join(dirname, filename)
if os.path.isfile(full_name) and filename.endswith("job_log"):
print "Parsing file ", full_name
parse_logs.parse(full_name, agg_results_filename)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"chenc10@126.com"
] | chenc10@126.com |
8a0590a6380487f11389d2d2e063ca5783a0bc38 | dc0b6b680fd1fc0ab86ed7a3460137cde3a8612d | /Meus códigos/Python/Economia/Antigos/importa_csv_tse_1b.py | 1be2bda35b65a06a2e53b139401c7268b730c330 | [] | no_license | pedromfnakashima/codigos_versionados | 6c8c692bc08a0dda39a82bf91c5245f28d9be330 | c40c94d69f1ee3dd4317786f1c25bcc1bbcc2bb9 | refs/heads/main | 2023-03-21T20:32:53.677701 | 2021-03-20T00:03:10 | 2021-03-20T00:03:10 | 305,754,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,533 | py | import pandas as pd
class importa():
pasta = r'C:\Users\pedro\bd\TEMÁTICO\TSE\consulta_cand_2018'
arquivo = r'\consulta_cand_2018_MS.csv'
caminho_completo = (pasta + arquivo).replace("\\", "/")
def importa_csv(self, lista):
df = pd.read_csv(self.caminho_completo,
usecols = lista,
encoding = 'latin',
delimiter = ';')
return df
class tse_colunas():
lista1 = [
'ANO_ELEICAO',
'NR_TURNO',
'DT_ELEICAO',
'DS_CARGO',
'NM_CANDIDATO',
'NM_URNA_CANDIDATO',
'NR_CPF_CANDIDATO',
'NM_EMAIL',
'TP_AGREMIACAO',
'NR_PARTIDO',
'SG_PARTIDO',
'NM_PARTIDO',
'NM_COLIGACAO',
'DS_COMPOSICAO_COLIGACAO',
'SG_UF_NASCIMENTO',
'NM_MUNICIPIO_NASCIMENTO',
'DT_NASCIMENTO',
'DS_GENERO',
'DS_GRAU_INSTRUCAO',
'DS_ESTADO_CIVIL',
'DS_COR_RACA',
'DS_OCUPACAO',
'DS_SIT_TOT_TURNO',
'ST_REELEICAO'
]
lista2 = [
'ANO_ELEICAO',
'NR_TURNO',
'DT_ELEICAO'
]
del lista1, lista2
df_tse_1 = pd.read_csv(caminho_completo,
usecols = lista1,
encoding = 'latin',
delimiter = ';')
df_tse_2 = pd.read_csv(caminho_completo,
usecols = lista2,
encoding = 'latin',
delimiter = ';')
df_tse_3 = pd.read_csv(caminho_completo,
usecols = lambda x: x not in lista1,
encoding = 'latin',
delimiter = ';')
del df_tse_1
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
import pandas as pd
from pathlib import Path
caminho = Path(r'C:\Users\pedro\bd\TEMÁTICO\TSE\consulta_cand_2018\consulta_cand_2018_MS.csv')
caminho
#f1 = lambda x: pd.to_numeric(x.replace("R$ ","").replace(",","."), errors="coerce")
converte_data = lambda x: pd.to_datetime(x, format='%d/%m/%Y', errors='coerce')
df_tse_1 = pd.read_csv(caminho,
usecols = tse_colunas.lista1,
encoding = 'latin',
dtype = {'DS_CARGO': 'category',
'TP_AGREMIACAO': 'category',
'NM_PARTIDO': 'category',
'NM_COLIGACAO': 'category',
'DS_COMPOSICAO_COLIGACAO': 'category',
'SG_UF_NASCIMENTO': 'category',
'NM_MUNICIPIO_NASCIMENTO': 'category',
'DS_GENERO': 'category',
'DS_GRAU_INSTRUCAO': 'category',
'DS_ESTADO_CIVIL': 'category',
'DS_COR_RACA': 'category',
'DS_OCUPACAO': 'category',
'DS_SIT_TOT_TURNO': 'category'},
delimiter = ';',
converters={"DT_NASCIMENTO": converte_data},
true_values=["S"],
false_values=["N"],
engine = "python")
# Funcionou com o Path!
df_tse_1.dtypes
print(df_tse_1.head())
########################################################
df_tse_1 = pd.read_csv(caminho,
usecols = tse_colunas.lista1,
encoding = 'latin',
delimiter = ';',
converters={"DT_NASCIMENTO": converte_data},
true_values=["S"],
false_values=["N"],
engine = "python")
# Funcionou com o Path!
df_tse_1.dtypes
print(df_tse_1.head())
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
## Filtros
df_tse_1.loc[df_tse_1['NM_CANDIDATO'].str.contains("PICARELLI")]
resultado1 = df_tse_1.loc[df_tse_1['NM_URNA_CANDIDATO']
.str.contains("BARBOSINHA"),
['NM_CANDIDATO', 'NM_URNA_CANDIDATO']]
resultado1 = df_tse_1.loc[df_tse_1['DS_CARGO'].str.contains("ESTADUAL"),
['DS_CARGO', 'NM_URNA_CANDIDATO', 'SG_PARTIDO', 'DT_NASCIMENTO', 'DS_GENERO', 'DS_GRAU_INSTRUCAO', 'DS_ESTADO_CIVIL', 'DS_SIT_TOT_TURNO']]
resultado1 = df_tse_1.loc[(df_tse_1['DS_CARGO'].str.contains("ESTADUAL"),
df_tse_1['DS_SIT_TOT_TURNO'] == 'ELEITO'),
['DS_CARGO', 'NM_URNA_CANDIDATO', 'SG_PARTIDO', 'DT_NASCIMENTO', 'DS_GENERO', 'DS_GRAU_INSTRUCAO', 'DS_ESTADO_CIVIL', 'DS_SIT_TOT_TURNO']]
resultado1 = df_tse_1.loc[:,['DS_CARGO', 'NM_URNA_CANDIDATO']]
resultado2 = df_tse_1.loc[:,['DS_CARGO', 'NM_URNA_CANDIDATO', 'SG_PARTIDO', 'DT_NASCIMENTO', 'DS_GENERO', 'DS_GRAU_INSTRUCAO', 'DS_ESTADO_CIVIL', 'DS_SIT_TOT_TURNO']]\
.loc[df_tse_1['DS_CARGO'].str.contains("ESTADUAL")]\
.loc[df_tse_1['DS_SIT_TOT_TURNO'] == 'ELEITO']
##########################################
resultado1 = df_tse_1.loc[:, ['DS_CARGO',
'NM_URNA_CANDIDATO',
'SG_PARTIDO',
'DT_NASCIMENTO',
'DS_GENERO',
'DS_GRAU_INSTRUCAO',
'DS_ESTADO_CIVIL',
'DS_SIT_TOT_TURNO']]\
.loc[df_tse_1['DS_CARGO'].str.contains("ESTADUAL")]\
.loc[df_tse_1['DS_SIT_TOT_TURNO'] == 'SUPLENTE']
##########################################
resultado1 = df_tse_1.loc[:, ['NM_URNA_CANDIDATO',
'SG_PARTIDO',
'DT_NASCIMENTO',
'DS_GENERO',
'DS_GRAU_INSTRUCAO',
'DS_ESTADO_CIVIL',
'DS_SIT_TOT_TURNO']]\
.loc[df_tse_1['DS_CARGO'].str.contains('ESTADUAL')]\
.loc[df_tse_1['DS_SIT_TOT_TURNO'].str.contains('^ELEITO', regex=True)]
len(resultado1)
##########################################
| [
"pedromfnakashima@gmail.com"
] | pedromfnakashima@gmail.com |
437895ed7aaef43911f95aafcd5cb599969661f1 | 5173c3e3956387a3f2ae8fcf4aed7c7a600dac78 | /SWEA/SWEA_5185_이진수.py | 3035925afbd3f73521eb396dd724f1fe9f88dd20 | [] | no_license | ma0723/Min_Algorithm | df75f53f6e89b7817d4b52d686effb8236a4ddac | b02d1043008cb32e22daa9d4207b9a45f111d66f | refs/heads/master | 2023-07-25T11:00:15.397093 | 2021-08-30T02:08:05 | 2021-08-30T02:08:05 | 375,613,927 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,185 | py | import sys
sys.stdin = open("5185.txt", "r")
hex_lst = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
T = int(input())
for tc in range(1, T+1):
print("#{}".format(tc), end= ' ')
N, hex = input().split()
# N 자리수 16진수
dec = []
# 10진수 변환 (hex_lst의 index)
for i in hex:
for j in range(16):
if hex_lst[j] == i:
dec.append(j)
# print(dec)
# 2진수 변환
for i in dec:
ans = ''
while i > 0:
ans = str(i % 2) + ans
# 나머지 (역순이므로 ans 뒤에 배치)
i //= 2
# 몫
# N자리 16진수가 주어지면 각 자리 수를 4자리 2진수로 표시하는 프로그램
# 2진수의 앞자리 0도 반드시 출력
if len(ans) != 4:
my_ans = '0'*(4-len(ans)) + ans
# 4자리가 모두 채워지지 않은 경우 앞부분 0 채우기
else:
my_ans = ans
# 4자리 모두 채워진 경우
print(my_ans, end='')
# 4자리 2진수 공백없이 나열
print()
# 다음문제 개행
| [
"ma0723@naver.com"
] | ma0723@naver.com |
911f9d2ca22cfaffd45aa32eed0a648b30ff9cec | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/rna-transcription/8117955873ba4f469f73ce4e2a833897.py | 930b205143cc064eaf9cd0425dad20fa99fdb7c2 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 136 | py | dict_map = {
'A': 'U',
'G': 'C',
'T': 'A',
'C': 'G'
}
def to_rna(dna):
return ''.join([dict_map[x] for x in dna])
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
f074af7028ccd82b4eb009889b16d1dabc110c69 | 38c10c01007624cd2056884f25e0d6ab85442194 | /remoting/host/DEPS | 3abc8545dbe42bf8933ae6066acb8037b5c824d2 | [
"BSD-3-Clause"
] | permissive | zenoalbisser/chromium | 6ecf37b6c030c84f1b26282bc4ef95769c62a9b2 | e71f21b9b4b9b839f5093301974a45545dad2691 | refs/heads/master | 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 | BSD-3-Clause | 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null | UTF-8 | Python | false | false | 350 | include_rules = [
"+ash",
"+cc/output",
"+components/policy/core/common",
"+extensions/browser/api/messaging",
"+jingle/glue",
"+net",
"+remoting/codec",
"+remoting/protocol",
"+remoting/signaling",
"+remoting/tools",
"+third_party/jsoncpp",
"+third_party/modp_b64",
"+third_party/skia",
"+third_party/webrtc",
"+ui",
]
| [
"zeno.albisser@hemispherian.com"
] | zeno.albisser@hemispherian.com | |
480d52454e03c72cf93ca575ed707fe89e0b6db4 | e52b0124ad5f875ea16a10cc8aa5e771f5d7c3ea | /guniflask/security/web_authentication_details.py | 5375b3912e675d9a30289b0ae702bc9a83433405 | [
"MIT"
] | permissive | jadbin/guniflask | 24ec0c755827fe15ebbfeaec3149882ac6bc79b9 | f0f5029d03219b7793482dc3ed09eab508e538d6 | refs/heads/master | 2023-08-18T07:56:36.331037 | 2023-08-09T02:48:23 | 2023-08-09T02:48:23 | 147,511,047 | 14 | 2 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from flask import request
class WebAuthenticationDetails:
def __init__(self):
self.remote_address = request.remote_addr
| [
"jadbin.com@hotmail.com"
] | jadbin.com@hotmail.com |
f670c6e3e5f42ee3819ef8f36431f55282ea2c60 | 8c917dc4810e2dddf7d3902146280a67412c65ea | /v_7/Dongola/common/hr_custom/report/transfer_report_1.py | ff5c7f89843d579d6b10234303ef5e47105ca182 | [] | no_license | musabahmed/baba | d0906e03c1bbd222d3950f521533f3874434b993 | 0b997095c260d58b026440967fea3a202bef7efb | refs/heads/master | 2021-10-09T02:37:32.458269 | 2018-12-20T06:00:00 | 2018-12-20T06:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,597 | py | import time
import pooler
#import rml_parse
import copy
from report import report_sxw
import pdb
import re
class transfer_report_1(report_sxw.rml_parse):
_name = 'report.transfer.report.1'
def __init__(self, cr, uid, name, context):
super(transfer_report_1, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'_get_emp':self._get_emp,
})
def _get_emp(self,ids):
#print "ids",ids
p = pooler.get_pool(self.cr.dbname).get('hr.process.archive')
#s=p.search(self.cr, self.uid,[('employee_id','=',ids)])
emp_id=p.browse(self.cr, self.uid,[ids])[0]
#print "jjjj",emp_id.employee_id.id
emp=emp_id.employee_id.id
comp=emp_id.company_id.id
#print "emp",emp
self.cr.execute('SELECT c.name as company,d.name as department,ar.transfer_date AS date,r.name AS employee_name FROM hr2_basic_transfer_archive AS ar left join hr_employee AS e on (ar.employee_id=e.id) left join resource_resource AS r on (e.resource_id=r.id) left join hr_department as d on (ar.department_id=d.id) left join res_company as c on (ar.company_id=c.id) where r.id=%s and c.id=%s order by ar.transfer_date'%(emp,comp))
res = self.cr.dictfetchall()
#print "transfer",res
return res
report_sxw.report_sxw('report.transfer.report.1', 'hr.process.archive',
'addons/hr_process/report/transfer_report_1.rml', parser=transfer_report_1, header=True)
| [
"bakry@exp-sa.com"
] | bakry@exp-sa.com |
9ee83697c077bff9c366dc651065957a5dcfd94c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_steering.py | 9d1b8159a6d286fa6567d17d84d7a4e429ff66b2 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _STEERING():
def __init__(self,):
self.name = "STEERING"
self.definitions = steer
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['steer']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
a3548216505b1663d119fa33ba99a302992ca5a7 | 258fffc9b1fda6ed152d3520facdcba54d5460d1 | /manga_py/providers/readmanga_me.py | feada82b8ce302fe0689a757f8e92de2a2386776 | [
"MIT"
] | permissive | theincognito-inc/manga-dl | a29422b8417a398e6b0273ae6f2374f3f4401021 | 899905bafb6c6891815b58cce41eaff32a682570 | refs/heads/stable_1.x | 2021-07-09T10:45:18.197767 | 2020-07-20T11:21:18 | 2020-07-20T11:21:18 | 161,990,334 | 0 | 0 | MIT | 2020-07-20T11:21:20 | 2018-12-16T10:31:37 | Python | UTF-8 | Python | false | false | 1,360 | py | from manga_py.provider import Provider
from .helpers.std import Std
class ReadmangaMe(Provider, Std):
def get_archive_name(self) -> str:
idx = self.get_chapter_index()
vol, ch = idx.split('-')
return self.normal_arc_name({'vol': vol, 'ch': ch})
def get_chapter_index(self):
_re = r'/.+/(?:vol)?([^/]+/[^/]+)(?:/|\?ma?t)?'
name = self.re.search(_re, self.chapter).group(1)
if ~name.find('?'):
name = name[:name.find('?')]
return name.replace('/', '-')
def get_main_content(self):
return self._get_content('{}/{}?mature=1&mtr=1')
def get_manga_name(self):
return self._get_name(r'\.\w{2,7}/([^/]+)')
def get_chapters(self):
return self._elements('div.chapters-link tr > td > a')
def get_files(self):
_uri = self.http().normalize_uri(self.chapter)
content = self.http_get(_uri)
result = self.re.search(r'rm_h\.init.+?(\[\[.+\]\])', content, self.re.M)
if not result:
return []
images = self.json.loads(
result.groups()[0].replace("'", '"')
)
return [i[0] + i[2] for i in images]
def get_cover(self):
return self._cover_from_content('.picture-fotorama > img')
def book_meta(self) -> dict:
# todo meta
pass
main = ReadmangaMe
| [
"sttv-pc@mail.ru"
] | sttv-pc@mail.ru |
d76bf2a679b9464bd4006997fb666f97c9a71c0f | 4c1d9bace72c568b39ffd88b2f3c6bcc58bfe892 | /main/board.py | 23bf487563dab53661c51941f95d421bebc6f853 | [] | no_license | tanghee/PULZIP_Project_Flask_Add | a5cd05190b4cb9f280508e44b797728a6a037efe | e240a510b7ec8cb94fff1a7e3ad47b30fe1b8c2a | refs/heads/master | 2023-02-01T12:45:23.377652 | 2020-12-16T01:50:57 | 2020-12-16T01:50:57 | 321,837,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,668 | py | from main import *
from flask import Blueprint
blueprint = Blueprint("board", __name__, url_prefix="/board")
category = [
{"박물관소개": {
"관장 인사글": "about",
"관람 안내 및 오시는 길": "location",
"관련 기사": "news",
"로고 소개": "logo",
}},
{"풀짚공예 전시실": {
"소장유물 소개": "relic",
"상설 전시": "expatiation_exhibition",
"특별 전시": "special_exhibition",
"체험교육 전시": "experience_exhibition",
}},
{"풀짚공예 교육": {
"풀짚공예란?": "info",
"만들기 동영상": "video",
"체험학습": "field_study",
"일반&전문가 심화과정": "normal_study",
}},
{"풀짚 문화": {
"책 소개": "culture_book",
"바구니여행": "culture_basket",
"풀짚갤러리": "pulzip_gallery",
}},
{"커뮤니티": {
"공지사항": "notice",
"자유게시판": "free",
"포토갤러리": "gallery",
"체험예약": "reservation",
}},
]
@blueprint.route("/list")
def board_list():
# 페이지 값 (값이 없는 경우 기본값은 1), 리미트 값 (몇 개의 게시물을 나오게 할 것인지)
page = request.args.get("page", 1, type=int)
limit = request.args.get("limit", 10, type=int)
board_sort = request.args.get("board_sort", -1, type=int)
board = mongo.db.board
tot_count = board.find({}).count() # 게시물의 총 개수
last_page_num = math.ceil(tot_count / limit) # 마지막 페이지 수 = 전체 게시물 수 / 페이지당 게시물 수
block_size = 5
block_num = int((page - 1) / block_size) # block 현재 위치
block_start = int((block_size * block_num) + 1) # block 시작 위치
block_last = math.ceil(block_start + (block_size - 1)) # block 마지막 위치
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("pubdate", -1)
if board_sort == 0:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("title", 1)
elif board_sort == 1:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("title", -1)
elif board_sort == 2:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("name", 1)
elif board_sort == 3:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("name", -1)
elif board_sort == 4:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("pubdate", -1)
elif board_sort == 5:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("pubdate", 1)
elif board_sort == 6:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("view", -1)
elif board_sort == 7:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("view", 1)
return render_template("/board/list.html", page=page, limit=limit, board_sort=board_sort, datas=datas, tot_count=tot_count, block_start=block_start, block_last=block_last, last_page_num=last_page_num, category=category)
@blueprint.route("/view/<idx>")
@login_required
def board_view(idx):
# idx = request.args.get("idx")
if idx is not None:
page = request.args.get("page")
board_sort = request.args.get("board_sort")
board = mongo.db.board
# data = board.find_one({"_id": ObjectId(idx)})
data = board.find_one_and_update({"_id": ObjectId(idx)}, {"$inc": {"view": 1}}, return_document=True)
if data is not None:
result = {
"id": data.get("_id"),
"name": data.get("name"),
"title": data.get("title"),
"contents": data.get("contents"),
"pubdate": data.get("pubdate"),
"view": data.get("view"),
"writer_id": data.get("writer_id", "")
}
return render_template("/board/view.html", result=result, page=page, board_sort=board_sort, category=category)
return abort(404)
@blueprint.route('/write', methods=["GET", "POST"])
@login_required
def board_write():
if request.method == "POST":
name = request.form.get("name")
title = request.form.get("title")
contents = request.form.get("contents")
current_utc_time = round(datetime.utcnow().timestamp() * 1000)
board = mongo.db.board
post = {
"name": name,
"title": title,
"contents": contents,
"pubdate": current_utc_time,
"writer_id": session.get("id"),
"view": 0,
}
x = board.insert_one(post)
return redirect(url_for("board.board_view", idx=x.inserted_id))
else:
return render_template("/board/write.html", category=category)
@blueprint.route("/edit/<idx>", methods=["GET", "POST"])
def board_edit(idx):
if request.method == "GET":
board = mongo.db.board
data = board.find_one({"_id": ObjectId(idx)})
if data is None:
flash("해당 게시물이 존재하지 않습니다.")
return redirect(url_for("board.board_list"))
else:
if session.get("id") == data.get("writer_id"):
return render_template("/board/edit.html", data=data, category=category)
else:
flash("글 수정 권한이 없습니다.")
return redirect(url_for("board.board_list"))
else:
title = request.form.get("title")
contents = request.form.get("contents")
board = mongo.db.board
data = board.find_one({"_id": ObjectId(idx)})
if session.get("id") == data.get("writer_id"):
board.update_one({"_id": ObjectId(idx)}, {
"$set": {
"title": title,
"contents": contents,
}
})
flash("수정되었습니다.")
return redirect(url_for("board.board_view", idx=idx))
else:
flash("글 수정 권한이 없습니다.")
return redirect(url_for("board.board_list"))
@blueprint.route("/delete/<idx>")
def board_delete(idx):
board = mongo.db.board
data = board.find_one({"_id": ObjectId(idx)})
if data.get("writer_id") == session.get("id"):
board.delete_one({"_id": ObjectId(idx)})
flash("삭제되었습니다.")
else:
flash("삭제 권한이 없습니다.")
return redirect(url_for("board.board_list"))
| [
"s2018w37@e-mirim.hs.kr"
] | s2018w37@e-mirim.hs.kr |
40b2cbe54c638b602a87ec294c62d7fd598a0607 | 4b27a7e99c55a343cb845d085dd88aa7e77a8079 | /iter.py | 817bc91b4ad139ed8a8b3c340c75968d37d55a26 | [] | no_license | damodardikonda/Python-Pandas- | a1f0395a9514dbb639116d35ae465b7135d92c2c | de95146cbb01047d87a5bb297d94c21181dbd629 | refs/heads/master | 2022-09-19T01:25:31.985004 | 2020-06-05T07:35:44 | 2020-06-05T07:35:44 | 269,561,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | import numpy as np
import pandas as pd
df=pd.DataFrame(np.random.rand(4,3),columns=['c1','c2','c3'])
print(df)
for k,v in df.iteritems():
print(k,v)
print("\n\nthrough row \n\n")
for row,row_index in df.iterrows():
print(row,row_index)
print("\n\n giving tuple asa value. first it print an index\n\n")
for r in df.itertuples():
print(r)
#Note − Do not try to modify any object while iterating. Iterating is meant for reading and the
# iterator returns a copy of the original object (a view), thus the changes will not reflect on the original object.
for i in df.iterrows();
i['a']=30
print(i)#it wont changes
| [
"damodar2dikonda@gmail.com"
] | damodar2dikonda@gmail.com |
d18ab02ef3bfd99d5e082a7ae112e606c37c79e5 | c14b274e98beeea6ad9f49b56dbc658e9083e160 | /Instanssi/ext_programme/migrations/0003_auto__add_field_programmeevent_event_type.py | 933e006bed9e56f87ab96c3d8cf25b9a7338ec97 | [
"MIT"
] | permissive | Yaamboo/Instanssi.org | 3096e59aa3c328dd52e4a5b8c29cdf8e5baddec0 | 17a09c3013ea83f46bd66dd412cfe5bb3a606710 | refs/heads/master | 2021-01-24T21:58:02.707521 | 2014-02-12T22:10:02 | 2014-02-12T22:10:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,017 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ProgrammeEvent.event_type'
db.add_column('ext_programme_programmeevent', 'event_type',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ProgrammeEvent.event_type'
db.delete_column('ext_programme_programmeevent', 'event_type')
models = {
'ext_programme.programmeevent': {
'Meta': {'object_name': 'ProgrammeEvent'},
'description': ('django.db.models.fields.TextField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['kompomaatti.Event']"}),
'event_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'github_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'home_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'icon_original': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'presenters': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'presenters_titles': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'twitter_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'wiki_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'kompomaatti.event': {
'Meta': {'object_name': 'Event'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mainurl': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
}
}
complete_apps = ['ext_programme'] | [
"katajakasa@gmail.com"
] | katajakasa@gmail.com |
84a090daec0107588cb4ff337ef930806f29e773 | 5ae15acd125798f3746c092d15dab5a9456d0121 | /backend/home/migrations/0002_load_initial_data.py | b745cb17280fa4e392e7f36d0c5f4cb05d00b5a9 | [] | no_license | crowdbotics-apps/kirpi-19355 | 8c5be04d51dccf08c44101e35561ad7f78207407 | 52aaca086597f9f9a98bc5af889d6f7addf0ac10 | refs/heads/master | 2022-11-25T06:54:58.142128 | 2020-08-04T03:43:38 | 2020-08-04T03:43:38 | 284,869,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "kirpi"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">kirpi</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "kirpi-19355.botics.co"
site_params = {
"name": "kirpi",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
ceeb42f92f947c9545d365c69ef98e60cff3c358 | 0c507f09b7328e58b8dc2003a30056699c772d6d | /binary_search/search_insert_position.py | de25f23ea581768d74647a7902df6e71d7dc0281 | [] | no_license | chicocheco/leetcode_exercises | 81c531fa418eaa62097ccda07cf1d21a882fb965 | c97b47907ddf6b5b26b448969f515068648ea9d9 | refs/heads/main | 2023-08-30T07:50:05.498088 | 2021-11-16T16:27:44 | 2021-11-16T16:27:44 | 402,787,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | """
https://leetcode.com/problems/search-insert-position
Given a sorted array of distinct integers and a target value, return the index if the target is found. If not,
return the index where it would be if it were inserted in order.
You must write an algorithm with O(log n) runtime complexity.
Input: nums = [1,3,5,6], target = 5
Output: 2
"""
def search_insert(nums, target):
left = 0
right = len(nums) - 1
while left <= right:
pivot = (left + right) // 2
curr = nums[pivot]
if target == curr:
return pivot
if target < curr:
right = pivot - 1
else:
left = pivot + 1
return left # no break and nothing returned in while loop
assert search_insert([7, 9, 11, 14], 14) == 3
assert search_insert([7, 9, 11, 14], 15) == 4
assert search_insert([7, 9, 11, 14], 8) == 1
assert search_insert([7, 9, 11, 14], 12) == 3
| [
"stanislav.matas@gmail.com"
] | stanislav.matas@gmail.com |
72db980ed59813e041dd86e6d8a4e7bbe29346aa | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /Python_Scripting_for_Computational_Science_Third_Edition/app/wavesim2D/F77/Verify/test1.py | 102913c481edabc9df7232e41e9ae80f82a89026 | [] | no_license | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 786 | py | #!/usr/bin/env python
import os, shutil, glob
os.chdir(os.pardir)
shutil.copy('main.f.orig', 'main.f')
shutil.copy('F77WAVE.fcp.orig', 'F77WAVE.fcp')
# edit main.f such that solutions are dumped,
# also use a small grid
os.system("perl -pi.old~ -e 's#^C(\s+)call dump# $1call dump#' main.f")
os.system("perl -pi.old~ -e 's#^[^C]\s+PARAMETER \(n=(\d+)\)# PARAMETER (n=31)#' main.f")
os.system("./make.sh")
os.chdir("Verify")
tmpfiles = glob.glob("tmp_*.mtv")
for file in tmpfiles: os.remove(file)
f = open('tmp.input', 'w')
f.write('20\n') # no of time steps
f.close()
os.system("../app < tmp.input")
# show on the screen:
from scitools.misc import findprograms
if findprograms(['plotmtv'], write_message=1):
os.system("plotmtv -geometry 600x700 -nodate -3d tmp_*.mtv")
| [
"bb@b.om"
] | bb@b.om |
ea125b5976ac3edee7174c1cbb098beeb5a9b5e9 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_212/ch117_2020_03_30_13_52_24_265011.py | dab219b83b876930425f22a7e679427c1de4cd78 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | from mat import sen
def snell_descartes (n1, n2, anguloI) :
ânguloR = ( n1/n2 ) * sen(anguloI)
return ânguloR
n1 = input ("qual o meio que o raio provem?")
n2 = input (" qual meio para o qual o raio passsa?")
anguloI = input ("qual o angulo de incidência?")
print (snell_descartes) | [
"you@example.com"
] | you@example.com |
a9c34a38bcbaac8f21ed25259bc049d08cead3f7 | ae83914f309ee203c9743a1c2273539862126e92 | /src/modules/bilstm_crf.py | 82e924e47768fb453d725c3dba95f9d9b1f84159 | [] | no_license | kajyuuen/pytorch-ner | e416af264dd24470b0d7c3c10346a96e241810da | 5b6084b122aa7bfe2f18fe63411535f180e24f8d | refs/heads/master | 2020-08-10T16:52:24.310618 | 2019-10-18T06:57:16 | 2019-10-18T06:57:16 | 214,380,670 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_partial_crf import CRF
from pytorch_partial_crf import PartialCRF
from typing import Tuple
from src.common.config import PAD_TAG
from src.common.utils import create_possible_tag_masks
from src.modules.bilstm import BiLSTM
class BiLSTM_CRF(nn.Module):
def __init__(self,
num_tags,
label_vocab,
char_vocab,
word_vocab,
emb_dict,
dropout_rate = 0,
batch_first = True,
inference_type = "CRF"):
super().__init__()
self.encoder = BiLSTM(num_tags,
label_vocab,
char_vocab,
word_vocab,
emb_dict,
batch_first = batch_first)
if inference_type in ["CRF", "Simple", "Hard"]:
self.inferencer = CRF(num_tags)
elif inference_type == "PartialCRF":
self.inferencer = PartialCRF(num_tags)
else:
raise ModuleNotFoundError
self.num_tags = num_tags
def forward(self, batch) -> torch.Tensor:
emissions, tags, mask = self._get_variable_for_decode(batch)
loss = self.inferencer(emissions, tags, mask)
return loss
def decode(self, batch) -> Tuple[torch.Tensor, torch.Tensor]:
emissions, tags, mask = self._get_variable_for_decode(batch)
best_tags_list = self.inferencer.viterbi_decode(emissions, mask)
return best_tags_list
def restricted_decode(self, base_batch, batch) -> Tuple[torch.Tensor, torch.Tensor]:
possible_tags = create_possible_tag_masks(self.num_tags, base_batch.label)
emissions, _, mask = self._get_variable_for_decode(batch)
best_tags_list = self.inferencer.restricted_viterbi_decode(emissions, possible_tags, mask)
return best_tags_list
def _get_variable_for_decode(self, batch) -> torch.Tensor:
emissions = self.encoder(batch)
tags = batch.label
mask = tags.clone().byte()
mask[mask != 0] = 1
return emissions, tags, mask
| [
"kajyuuen@gmail.com"
] | kajyuuen@gmail.com |
c54a497e07b5067727c7be96464ec42e722a69f7 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_0928+031/sdB_pg_0928+031_coadd.py | be060adebe66994d01bc1fb878a3d294c48c85fb | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[142.748667,2.842431], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_pg_0928+031/sdB_pg_0928+031_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_pg_0928+031/sdB_pg_0928+031_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
5601667b2989a7eb779fe5ff718bd7670f4c92cb | 52e8841ac9603e994fc487ecb52f232e55a50e07 | /Bio/NeuralNetwork/Training.py | af8c2f0be8fc4b890d4c2958db495acfaf54b2ad | [] | no_license | rored/RozszerzenieBio.PDB | aff434fddfe57199a7465f79126eba62b1c789ae | 7c9d696faacabff912b1263fe19291d6a198c3c2 | refs/heads/master | 2021-01-21T04:50:37.903227 | 2016-06-23T19:15:42 | 2016-06-23T19:15:42 | 55,064,794 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,149 | py | # This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Provide classes for dealing with Training Neural Networks."""
# standard modules
import random
__docformat__ = "restructuredtext en"
class TrainingExample(object):
"""Hold inputs and outputs of a training example."""
def __init__(self, inputs, outputs, name=""):
self.name = name
self.inputs = inputs
self.outputs = outputs
class ExampleManager(object):
"""Manage a grouping of Training Examples.
This is meant to make it easy to split a bunch of training examples
into three types of data:
o Training Data -- These are the data used to do the actual training
of the network.
o Validation Data -- These data are used to validate the network
while training. They provide an independent method to evaluate how
the network is doing, and make sure the network gets trained independent
of noise in the training data set.
o Testing Data -- The data which are used to verify how well a network
works. They should not be used at all in the training process, so they
provide a completely independent method of testing how well a network
performs.
"""
def __init__(self, training_percent=.4, validation_percent=.4):
"""Initialize the manager with the training examples.
Arguments:
o training_percent - The percentage of the training examples that
should be used for training the network.
o validation_percent - Percent of training examples for validating
a network during training.
Attributes:
o train_examples - A randomly chosen set of examples for training
purposes.
o valdiation_examples - Randomly chosesn set of examples for
use in validation of a network during training.
o test_examples - Examples for training purposes.
"""
assert training_percent + validation_percent <= 1.0, \
"Training and validation percentages more than 100 percent"
self.train_examples = []
self.validation_examples = []
self.test_examples = []
self.training_percent = training_percent
self.validation_percent = validation_percent
def add_examples(self, training_examples):
"""Add a set of training examples to the manager.
Arguments:
o training_examples - A list of TrainingExamples to manage.
"""
placement_rand = random.Random()
# assign exact example randomly to the example types
for example in training_examples:
chance_num = placement_rand.random()
# assign with the specified percentage
if chance_num <= self.training_percent:
self.train_examples.append(example)
elif chance_num <= (self.training_percent +
self.validation_percent):
self.validation_examples.append(example)
else:
self.test_examples.append(example)
| [
"Viktoria@MacBook-Pro-Viktoria.local"
] | Viktoria@MacBook-Pro-Viktoria.local |
fe12c229a6c55bc5aa28a38dcbb95155d30832de | 0ca2d3fcd53fb9795c2d8741affe87cccede300c | /scipy_doc/routines.sort.html/numpy.argmax.py | 93c8150171a861d2001b87f58f31e8fd46dc7b9c | [] | no_license | yzozulya/numpy_test_examples | 9bafc5d0711149a3366a0644309be6ff6d480c7a | 4b1e65b160728a4c483d883bd00b72b2f61377b8 | refs/heads/master | 2021-01-01T19:01:50.560912 | 2015-04-08T12:18:46 | 2015-04-08T12:18:46 | 33,604,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | import numpy as np
a = np.arange(6).reshape(2, 3)
a
np.argmax(a)
np.argmax(a, axis=0)
np.argmax(a, axis=1)
b = np.arange(6)
b[1] = 5
b
np.argmax(b) # Only the first occurrence is returned.
| [
"yulia.zozulya@jetbrains.com"
] | yulia.zozulya@jetbrains.com |
3033ca338ddf9127998248fb43affafd10c89356 | 55afd3bbe5187dba96be169a7c068c7cf7543447 | /article17/speciessummary/attrs_conclusion/td_range_conclusion.py | 5198c6beeb71524b0e782223750eee3efb0afa07 | [] | no_license | eaudeweb/art17-2006 | 6d9413439e10f4db0b72fc49c80b7c50ee1ef59e | 4bc61cd2972f94769dae97b95ccb55f2a0952cf1 | refs/heads/master | 2016-09-05T13:33:19.280952 | 2014-01-30T09:54:27 | 2014-01-30T09:54:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | # Script (Python)
# /article17/speciessummary/attrs_conclusion/td_range_conclusion
# params: 'assesment_speciesname, region, record, conclusions'
## Script (Python) "td_range_conclusion"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=assesment_speciesname, region, record, conclusions
##title=
##
output = context.background_colour(record['conclusion_range'], 'center', conclusions)
title = output.get('title', '')
method = record['method_range'] or ''
cursor = context.sql_methods.get_range_conclusion_value(assesment_speciesname=assesment_speciesname, region=region, assessment_method=method)
if len(cursor):
concl_value = cursor[0]['percentage_range_surface_area']
if concl_value:
title = "%s: %s" % (title, concl_value)
output.update({
'content': method,
'title': title,
})
return output
| [
"alex@grep.ro"
] | alex@grep.ro |
2da48ad1d33abb08e7a09bd8a674dd46969a63a8 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/9b44109ba3091e6538dedfac2987cf31793e892d-<configure_cpu_and_memory>-bug.py | fddbaac7d9ab62f50ffbbd644493ecf7bba42ed2 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,690 | py | def configure_cpu_and_memory(self, vm_obj, vm_creation=False):
if ('hardware' in self.params):
if ('num_cpus' in self.params['hardware']):
try:
num_cpus = int(self.params['hardware']['num_cpus'])
except ValueError as e:
self.module.fail_json(msg='hardware.num_cpus attribute should be an integer value.')
if ('num_cpu_cores_per_socket' in self.params['hardware']):
try:
num_cpu_cores_per_socket = int(self.params['hardware']['num_cpu_cores_per_socket'])
except ValueError as e:
self.module.fail_json(msg='hardware.num_cpu_cores_per_socket attribute should be an integer value.')
if ((num_cpus % num_cpu_cores_per_socket) != 0):
self.module.fail_json(msg='hardware.num_cpus attribute should be a multiple of hardware.num_cpu_cores_per_socket')
self.configspec.numCoresPerSocket = num_cpu_cores_per_socket
if ((vm_obj is None) or (self.configspec.numCoresPerSocket != vm_obj.config.hardware.numCoresPerSocket)):
self.change_detected = True
self.configspec.numCPUs = num_cpus
if ((vm_obj is None) or (self.configspec.numCPUs != vm_obj.config.hardware.numCPU)):
self.change_detected = True
elif (vm_creation and (not self.params['template'])):
self.module.fail_json(msg='hardware.num_cpus attribute is mandatory for VM creation')
if ('memory_mb' in self.params['hardware']):
try:
self.configspec.memoryMB = int(self.params['hardware']['memory_mb'])
except ValueError:
self.module.fail_json(msg='Failed to parse hardware.memory_mb value. Please refer the documentation and provide correct value.')
if ((vm_obj is None) or (self.configspec.memoryMB != vm_obj.config.hardware.memoryMB)):
self.change_detected = True
elif (vm_creation and (not self.params['template'])):
self.module.fail_json(msg='hardware.memory_mb attribute is mandatory for VM creation')
if ('hotadd_memory' in self.params['hardware']):
self.configspec.memoryHotAddEnabled = bool(self.params['hardware']['hotadd_memory'])
if ((vm_obj is None) or (self.configspec.memoryHotAddEnabled != vm_obj.config.memoryHotAddEnabled)):
self.change_detected = True
if ('hotadd_cpu' in self.params['hardware']):
self.configspec.cpuHotAddEnabled = bool(self.params['hardware']['hotadd_cpu'])
if ((vm_obj is None) or (self.configspec.cpuHotAddEnabled != vm_obj.config.cpuHotAddEnabled)):
self.change_detected = True
if ('hotremove_cpu' in self.params['hardware']):
self.configspec.cpuHotRemoveEnabled = bool(self.params['hardware']['hotremove_cpu'])
if ((vm_obj is None) or (self.configspec.cpuHotRemoveEnabled != vm_obj.config.cpuHotRemoveEnabled)):
self.change_detected = True
if ('memory_reservation' in self.params['hardware']):
memory_reservation_mb = 0
try:
memory_reservation_mb = int(self.params['hardware']['memory_reservation'])
except ValueError as e:
self.module.fail_json(msg=('Failed to set memory_reservation value.Valid value for memory_reservation value in MB (integer): %s' % e))
mem_alloc = vim.ResourceAllocationInfo()
mem_alloc.reservation = memory_reservation_mb
self.configspec.memoryAllocation = mem_alloc
if ((vm_obj is None) or (self.configspec.memoryAllocation.reservation != vm_obj.config.memoryAllocation.reservation)):
self.change_detected = True
if ('memory_reservation_lock' in self.params['hardware']):
self.configspec.memoryReservationLockedToMax = bool(self.params['hardware']['memory_reservation_lock'])
if ((vm_obj is None) or (self.configspec.memoryReservationLockedToMax != vm_obj.config.memoryReservationLockedToMax)):
self.change_detected = True
if ('boot_firmware' in self.params['hardware']):
boot_firmware = self.params['hardware']['boot_firmware'].lower()
if (boot_firmware not in ('bios', 'efi')):
self.module.fail_json(msg=("hardware.boot_firmware value is invalid [%s]. Need one of ['bios', 'efi']." % boot_firmware))
self.configspec.firmware = boot_firmware
if ((vm_obj is None) or (self.configspec.firmware != vm_obj.config.firmware)):
self.change_detected = True | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
e906b7e87133a2381c44cadedd1eb9b84a7f7ba0 | 34851e4fedf2fea3aa5b87a923951ee8887344e9 | /public_markets/campbxusd.py | 5325df38835caeb491fda2567a16a2633a0ae639 | [] | no_license | kafitz/btc-arbitrage | cfb8a32bdea0312bb3a5a9bd70a36ebdf7f6d011 | 52d66bd47c2bccb9a75c06dda0ee4db9a7436ebb | refs/heads/master | 2021-01-23T18:11:11.211742 | 2013-03-25T18:16:13 | 2013-03-25T18:16:13 | 8,922,078 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | import urllib2
import json
import logging
from market import Market
class CampBXUSD(Market):
'''Updates CampBX depth tables'''
def __init__(self):
super(CampBXUSD, self).__init__("USD")
self.update_rate = 25
self.depth = {'asks': [{'price': 0, 'amount': 0}], 'bids': [{'price': 0, 'amount': 0}]}
# {withdraw: amount bitcoins charged as network fee, exchange_rate: % for currency exchange}
self.fees = {'withdraw': 0, 'exchange_rate': 0.0055}
def update_depth(self):
try:
res = urllib2.urlopen('http://campbx.com/api/xdepth.php')
jsonstr = res.read()
data = json.loads(jsonstr)
self.depth = self.format_depth(data)
except:
logging.error("%s - depth data fetch error." % (self.name,))
def sort_and_format(self, l, reverse=False):
# Sort the list of prices/amount lists by price
l.sort(key=lambda x: float(x[0]), reverse=reverse)
# Create a dict pair from each list keypair
ret = []
for i in l:
ret.append({'price': float(i[0]), 'amount': float(i[1])})
return ret
def format_depth(self, data):
bids = self.sort_and_format(data["Bids"], True)
asks = self.sort_and_format(data["Asks"], False)
return {'asks': asks, 'bids': bids}
if __name__ == "__main__":
market = CampBXUSD()
print market.get_depth()
| [
"kafitz22@gmail.com"
] | kafitz22@gmail.com |
9e553106f69822bb6c99bf3d3f16c2118c568b90 | 9cc51b53bc3cac814843758fb98d212cd9656a0b | /model/product_base_info.py | 40efd57256ca03c7d4d1aabcf8292fbcf495cfef | [
"Apache-2.0"
] | permissive | juxiangwu/PSS | ead5317e54fcc3ad12e6df10b9956e276cfa8a6d | 9108ca6b669f0bec9647d015d14b421dacc02645 | refs/heads/master | 2021-05-10T15:53:01.649567 | 2018-02-17T14:35:37 | 2018-02-17T14:35:37 | 118,563,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,037 | py | # -*- coding:utf-8 -*-
# 商品基本信息表
from config.appconfig import db
import datetime
class ProductBaseInfo(db.Model):
__tablename__ = "t_product_base_info"
id = db.Column('id',db.Integer,primary_key = True)
shopId = db.Column('shop_id',db.Integer)
name = db.Column('name',db.String(128))
code = db.Column('code',db.String(128))
barcode = db.Column('barcode',db.String(128))
pinyinCode = db.Column('pinyin_code',db.String(128))
categoryName = db.Column('category_name',db.String(128))
categoryId = db.Column('category_id',db.Integer)
unitName = db.Column('unit_name',db.String(128))
puchasePrice = db.Column('puchase_price',db.Float)
retailPrice = db.Column('sell_price',db.Float)
wholesalePrice = db.Column('wholesale_price',db.Float)
supplierName = db.Column('supplier_name',db.String(128))
supplierId = db.Column('supplier_id',db.Integer)
isEnabled = db.Column('is_enabled',db.Boolean)
createDateTime = db.Column('create_datetime',db.DateTime)
modifyDateTime = db.Column('modify_datetime',db.DateTime)
def __init__(self,shopId,name,code,barcode,pinyinCode,categoryId,
categoryName,unitName,puchasePrice,retailPrice,
wholesalePrice,supplierName,supplierId,createDateTime,
modifyDateTime,isEnabled=True):
self.shopId = shopId
self.name = name
self.code = code
self.barcode = barcode
self.pinyinCode = pinyinCode
self.categoryId = categoryId
self.categoryName = categoryName
self.unitName = unitName
self.puchasePrice = puchasePrice
self.wholesalePrice = wholesalePrice
self.retailPrice = retailPrice
self.supplierId = supplierId
self.supplierName = supplierName
self.createDateTime = createDateTime
self.modifyDateTime = modifyDateTime
self.isEnabled = isEnabled
def to_json(self):
return {
"id":self.id,
"shopId":self.shopId,
"name":self.name,
"code":self.code,
"barcode":self.barcode,
"pinyinCode":self.pinyinCode,
"categoryName":self.categoryName,
"categoryId":self.categoryId,
"unitName":self.unitName,
"purchasePrice":self.puchasePrice,
"retailPrice":self.retailPrice,
"wholesalePrice":self.wholesalePrice,
"supplierName":self.supplierName,
"supplierId":self.supplierId,
"isEnable":self.isEnabled,
"createDateTime":self.createDateTime.strftime("%Y-%m-%d %H:%M:%S"),
"modifyDateTime":self.modifyDateTime.strftime("%Y-%m-%d %H:%M:%S")
}
def __repr__(self):
if self.id:
return '<ProductBaseInfo@id=%d,name=%s,shopId=%d,categoryId=%d>' %(self.id,self.name,self.shopId,self.categoryId)
else:
return '<ProductBaseInfo@name=%s,shopId=%d,categoryId=%d>' %(self.name,self.shopId,self.categoryId) | [
"kkoolerter@gmail.com"
] | kkoolerter@gmail.com |
d4450b0c6d0c8606e68e61901bc5d21cb770cc72 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /090_logging/examples/Python Logging – Simplest Guide with Full Code and Examples/008_9. How to include traceback information in logged messages.py | 50bc97aa806ada705b4a3271a710053b45bd153f | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 974 | py | # Besides debug, info, warning, error, and critical messages, you can log exceptions that will include any
# associated traceback information.
# With logger.exception, you can log traceback information should the code encounter any error. logger.exception will log
# the message provided in its arguments as well as the error message traceback info.
#
# Below is a nice example.
import logging
# Create or get the logger
logger = logging.getLogger(__name__)
# set log level
logger.setLevel(logging.INFO)
def divide(x, y):
try:
out = x / y
except ZeroDivisionError:
logger.exception("Division by zero problem")
else:
return out
# Logs
logger.error("Divide {x} / {y} = {c}".format(x=10, y=0, c=divide(10,0)))
#> ERROR:__main__:Division by zero problem
#> Traceback (most recent call last):
#> File "<ipython-input-16-a010a44fdc0a>", line 12, in divide
#> out = x / y
#> ZeroDivisionError: division by zero
#> ERROR:__main__:None | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
7d2228d08984e1abc9de2e0912b38ac6830e3e21 | f3b233e5053e28fa95c549017bd75a30456eb50c | /bace_input/L4M/4M-3J_MD_NVT_rerun/set.py | 15a567d11dc43cc9d57465a49bebaf753252a7d4 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | import os
dir = '/mnt/scratch/songlin3/run/bace/L4M/MD/ti_one-step/4M_3J/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_prodin = filesdir + 'temp_prod.in'
temp_pbs = filesdir + 'temp.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#prodin
prodin = workdir + "%6.5f_prod.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../4M-3J_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
07e54c5c203580c9ff29ebbc1c41db6d46819a28 | a3cc7286d4a319cb76f3a44a593c4a18e5ddc104 | /lib/surface/datastore/export.py | 4794c8107aa6b3cc5df32a1d806019761b0348b6 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jordanistan/Google-Cloud-SDK | f2c6bb7abc2f33b9dfaec5de792aa1be91154099 | 42b9d7914c36a30d1e4b84ae2925df7edeca9962 | refs/heads/master | 2023-09-01T01:24:53.495537 | 2023-08-22T01:12:23 | 2023-08-22T01:12:23 | 127,072,491 | 0 | 1 | NOASSERTION | 2023-08-22T01:12:24 | 2018-03-28T02:31:19 | Python | UTF-8 | Python | false | false | 3,599 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud datastore export command."""
from googlecloudsdk.api_lib.datastore import admin_api
from googlecloudsdk.api_lib.datastore import operations
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.datastore import flags
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class Export(base.Command):
"""Export Cloud Datastore entities to Google Cloud Storage.
Export a copy of all or a subset of entities from Google Cloud Datastore
to another storage system, such as Google Cloud Storage. Recent
updates to entities may not be reflected in the export. The export occurs in
the background and its progress can be monitored and managed via the operation
commands. The output of an export may only be used once the operation has
completed. If an export operation is cancelled before completion then it may
leave partial data behind in Google Cloud Storage.
"""
@staticmethod
def Args(parser):
"""Register flags for this command."""
flags.AddEntityFilterFlags(parser)
flags.AddLabelsFlag(parser)
parser.add_argument(
'output_url_prefix',
help="""
Location for the export metadata and data files. Must be a valid
Google Cloud Storage bucket with an optional path prefix. For example:
$ {command} gs://mybucket/my/path
Will place the export in the `mybucket` bucket in objects prefixed with
`my/path`.
""")
base.ASYNC_FLAG.AddToParser(parser)
def Run(self, args):
project = properties.VALUES.core.project.Get(required=True)
destination = self._ParseGCSObjectPrefix(args.output_url_prefix)
response = admin_api.Export(
project,
# use join and filter to avoid trailing '/'.
'gs://{}'
.format('/'.join([part for part in destination if part is not None])),
kinds=args.kinds,
namespaces=args.namespaces,
labels=args.operation_labels)
if not args.async:
operations.WaitForOperation(response)
return response
def _ParseGCSObjectPrefix(self, resource):
"""Parses a GCS bucket with an optional object prefix.
Args:
resource: the user input resource string.
Returns:
a tuple of strings containing the GCS bucket and GCS object. The GCS
object may be None.
"""
try:
# Try as bucket first so that a single id is interpretted as a bucket
# instead of an object with a missing bucket.
bucket_ref = resources.REGISTRY.Parse(
resource, collection='storage.buckets')
# Call Parse rather than Create to set validate to False, allowing the
# empty object.
return (bucket_ref.bucket, None)
except resources.UserError:
# Ignored, we'll try parsing again as an object.
pass
object_ref = resources.REGISTRY.Parse(
resource, collection='storage.objects')
return (object_ref.bucket, object_ref.object)
| [
"jordan.robison@gmail.com"
] | jordan.robison@gmail.com |
a0aed7ae9aa7d5db56497466a5298e8c84067b43 | 9da09ad3aba9501d856f343bbc6d55bdcff1a346 | /apiv1/views.py | 68b9a851a6bf654e2fdb7270c5f321e4be0f2171 | [] | no_license | akiyoko/drf-vue-sample.vue.config.js | 696220a3bf7b590090d8ebe3658bf2f8c88c94b5 | c988326e43a89247e7620b115d37bb81060c5532 | refs/heads/master | 2020-05-01T19:41:24.296791 | 2019-03-25T19:54:53 | 2019-03-25T19:54:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from shop.models import Book
from .serializers import BookSerializer
class BookViewSet(viewsets.ModelViewSet):
queryset = Book.objects.all()
serializer_class = BookSerializer
permission_classes = (IsAuthenticated,)
| [
"akiyoko@users.noreply.github.com"
] | akiyoko@users.noreply.github.com |
2dd2beb5a598c05ee0915af77e5ed8c43a7e983b | 677ee80f61be1faa4397c747e5c8c21e1e8fab17 | /test_case/case49.py | 7a03366997b09cb24d31db7aec56a25c97cbf11f | [] | no_license | YGragon/PythonSelfStudy | 9f08d1d295f075e996dd493c68c99be94176f3d5 | ffbf0a7a1f9bfb053eb878fac5467563d8e3fb92 | refs/heads/master | 2021-09-10T14:08:02.511883 | 2018-03-27T14:31:10 | 2018-03-27T14:31:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | # 题目:使用lambda来创建匿名函数
#!/usr/bin/python
# -*- coding: UTF-8 -*-
MAXIMUM = lambda x,y : (x > y) * x + (x < y) * y
MINIMUM = lambda x,y : (x > y) * y + (x < y) * x
a = 10
b = 20
print('The largar one is %d' % MAXIMUM(a,b))
print('The lower one is %d' % MINIMUM(a,b)) | [
"1105894953@qq.com"
] | 1105894953@qq.com |
ef6cf78a22ffb85b42ae2798abe283fe30f6ff82 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc078/A/3912467.py | 658f7cff8c6dea4de0cd4537b922000c3104b973 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | n = int(input())
cards = list(map(int, input().split()))
a = 0
b = sum(cards)
min_diff = float('inf')
for i in range(n - 1):
a += cards[i]
b -= cards[i]
min_diff = min(min_diff, abs(a - b))
print(min_diff) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
3a183e26c8edd2de4a97a8f172af772f6a7dd122 | 148072ce210ca4754ea4a37d83057e2cf2fdc5a1 | /src/core/w3af/w3af/plugins/tests/infrastructure/test_dot_net_errors.py | ff918717ca0724de044b8a05f9c1bfaf423b4898 | [] | no_license | ycc1746582381/webfuzzer | 8d42fceb55c8682d6c18416b8e7b23f5e430c45f | 0d9aa35c3218dc58f81c429cae0196e4c8b7d51b | refs/heads/master | 2021-06-14T18:46:59.470232 | 2017-03-14T08:49:27 | 2017-03-14T08:49:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,626 | py | """
test_dot_net_errors.py
Copyright 2012 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from nose.plugins.attrib import attr
from w3af.plugins.tests.helper import PluginTest, PluginConfig
class TestDotNetErrors(PluginTest):
moth_url = 'http://moth/w3af/infrastructure/dot_net_errors/'
_run_configs = {
'cfg': {
'target': moth_url,
'plugins': {'infrastructure': (PluginConfig('dot_net_errors'),),
'crawl': (PluginConfig('web_spider',
('only_forward', True, PluginConfig.BOOL), ),)}
}
}
@attr('ci_fails')
def test_dot_net_errors(self):
cfg = self._run_configs['cfg']
self._scan(cfg['target'], cfg['plugins'])
infos = self.kb.get('dot_net_errors', 'dot_net_errors')
self.assertEqual(len(infos), 1, infos)
info = infos[0]
self.assertEqual(
info.get_name(), 'Information disclosure via .NET errors')
| [
"everping@outlook.com"
] | everping@outlook.com |
601167fca8573e9ec1732967599f8addb1c342fc | 233f97c6f360d478bf975016dd9e9c2be4a64adb | /Circle and Lattice Points.py | 3591c2952ec5ff8683a94b818e74834b0ad5c205 | [] | no_license | unknownboyy/GUVI | 3dbd1bb2bc6b3db52f5f79491accd6c56a2dec45 | d757dd473c4f5eef526a516cf64a1757eb235869 | refs/heads/master | 2020-03-27T00:07:12.449280 | 2019-03-19T12:57:03 | 2019-03-19T12:57:03 | 145,595,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | for _ in range(int(input())) :
n=int(input())
count=0
for i in range(-n,n+1):
for j in range(-n,n+1):
if i**2+j**2==n**2:
count+=1
print(count) | [
"ankitagrawal11b@gmail.com"
] | ankitagrawal11b@gmail.com |
8a44e891c7a3a9bce2c86d9cd8efebbbcfe03c93 | 42de984305948658f7487a19f0019034d53781e3 | /Config/AutoStkConfig.py | a9d46ed52c7976044f6e601356f39da0cca81080 | [] | no_license | lzwhw2000/MoDeng | a5037d3298f0285d9aca6af831084dbc60738bba | a521f23214a30ff0497e0ad5797e2190be057848 | refs/heads/master | 2020-08-10T17:16:33.771057 | 2019-10-10T10:01:39 | 2019-10-10T10:01:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,827 | py | # encoding=utf-8
"""
这个脚本是用来存储 stk自动检测 程序的配置信息
"""
import os
cubic_test_last_step = 7 # 在曲线拟合时,取最后的几个值进行二次拟合
# 图片存在的路径,如果没有自动创建
if os.path.exists('C:/Users/paul/Desktop/软件代码/MoDeng/TempPicStore/'):
pic_save_dir_root = 'C:/Users/paul/Desktop/软件代码/MoDeng/TempPicStore/'
elif os.path.exists('F:/软件代码/MoDeng/TempPicStore/'):
pic_save_dir_root = 'F:/软件代码/MoDeng/TempPicStore/'
else:
os.makedirs('C:/Users/paul/Desktop/软件代码/MoDeng/TempPicStore/')
pic_save_dir_root = 'C:/Users/paul/Desktop/软件代码/MoDeng/TempPicStore/'
plot_current_days_amount = 40 # 画出近期的stk走势情况,该参数指示最近取的天数
tailLengthForMACD = 150 # 在计算MACD时,因为之用最近的几个数,所以不需要往前延伸太多,以节省计算量
# 关心的stk
stk_list = [
'cyb',
'sh',
'sz',
'300508',
'000625',
'000725',
'000001',
'000333',
'300508',
'002456',
'603421',
'300059',
'600487',
'600036'
]
step_corner_detect = 6 # 在判断拐点的时候,取最近的数据的个数
corner_Pot_Retrospective_Half = 6 # 进行后验检测拐点时,时间窗的一半
curPath = os.path.abspath(os.path.dirname(__file__))
# rootPath = curPath[:curPath.find("MoDeng\\")+len("MoDeng\\")] # 获取myProject,也就是项目的根路径
rootPath = curPath[:curPath.find("MoDeng\\")+len("MoDeng\\")] # 获取myProject,也就是项目的根路径
MDataPWD = os.path.abspath(rootPath + '/RelativeRank/')
SeaSelectDataPWD = os.path.abspath(rootPath+'/AutoDailyOpt/SeaSelect/')
LastScale = os.path.abspath(rootPath+'/AutoDailyOpt/') + '/LastScale/'
| [
"1210055099@qq.com"
] | 1210055099@qq.com |
508e5a4048912eff6337b872b13c26966803af58 | 372edad1cd6399cadba82818e9fb9682c3bac1b4 | /packages/python/plotly/plotly/validators/layout/grid/_xaxes.py | 0d74e6e6750d28b16d373255bc04c0c95bdc1dd2 | [
"MIT"
] | permissive | OGVGdev/plotly.py | 78bfa9e25e92c367f0da30af7885cdd163ba612b | 96a9101c79aa588023f56153bf274d0d570ffcf6 | refs/heads/master | 2022-11-10T16:44:06.732450 | 2020-06-26T13:07:06 | 2020-06-26T13:07:06 | 275,173,321 | 1 | 0 | MIT | 2020-06-26T14:19:41 | 2020-06-26T14:19:40 | null | UTF-8 | Python | false | false | 756 | py | import _plotly_utils.basevalidators
class XaxesValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="xaxes", parent_name="layout.grid", **kwargs):
super(XaxesValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
free_length=kwargs.pop("free_length", True),
items=kwargs.pop(
"items",
{
"valType": "enumerated",
"values": ["/^x([2-9]|[1-9][0-9]+)?$/", ""],
"editType": "plot",
},
),
role=kwargs.pop("role", "info"),
**kwargs
)
| [
"noreply@github.com"
] | OGVGdev.noreply@github.com |
f108a254b580b424e84bb5fd810f6968e00aa74f | b31e7898aa5131125f243eaff973049b17e08512 | /.venv/lib/python3.10/site-packages/dill/tests/__main__.py | b68e86778239f5f5f2118187bfce74c1a380518a | [] | no_license | ramsred/MyProjects | f2978eeda3d73421daf0da9f2d012caef6c3ccda | a7f90ef1ecfbc7517be61e71286bd14405985de5 | refs/heads/master | 2023-07-09T03:19:17.683705 | 2023-07-02T19:30:19 | 2023-07-02T19:30:19 | 71,980,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2018-2022 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/dill/blob/master/LICENSE
import glob
import os
import sys
import subprocess as sp
python = sys.executable
try:
import pox
python = pox.which_python(version=True) or python
except ImportError:
pass
shell = sys.platform[:3] == 'win'
suite = os.path.dirname(__file__) or os.path.curdir
tests = glob.glob(suite + os.path.sep + 'test_*.py')
if __name__ == '__main__':
failed = 0
for test in tests:
p = sp.Popen([python, test], shell=shell).wait()
if p:
print('F', end='', flush=True)
failed = 1
else:
print('.', end='', flush=True)
print('')
exit(failed)
| [
"venkataramireddy534@gmail.com"
] | venkataramireddy534@gmail.com |
f3b2560ba77a4984ceabfcf27511aa0776e95190 | d735b8354e06eb26aa5ed0ac25ebf96bdd8d67b6 | /python16/day1-21/day003 字符串/07 字符串常用操作2.py | 352270ab025dd6ec5f426bbad105b2152683d6ba | [] | no_license | cn5036518/xq_py | e004766e6b2582ba37d7335320ed6b42f563c46c | ac932dc7fcb89a7a7faf8bda80791743755fd557 | refs/heads/master | 2021-07-15T18:44:19.244025 | 2020-09-12T09:38:25 | 2020-09-12T09:38:25 | 208,355,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,272 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#五 条件判断
# #1 判断是否由字母和数字组成
# s = "abc123"
# s2 =s.isalnum()
# print(s2) #True
#
# s = "abc123一二壹"
# s2 =s.isalnum() #注意:一二 壹 isalnum()也认识
# print(s2) #True
#
# #2 判断是否是字母组成
# s ="abc"
# s2 =s.isalpha()
# print(s2) #True
#
# s ="abc "
# s2 =s.isalpha() #多了一个空格
# print(s2) #False
#3判断是否是数字组成(整数\十进制整数\中文数字-一二壹等)
# s = "123"
# s2 =s.isdigit() #判断整数 数字 #重点 判断是否由数字组成
# print(s2) #True
#
# s = "123.2"
# s2 =s.isdigit() #判断整数 数字
# print(s2) #False
# s = "1.5"
# s2 =s.isdecimal() #判断十进制数字,而不是判断小数
# print(s2) #False
#
# s = "145"
# s2 =s.isdecimal() #判断十进制数字,而不是判断小数
# print(s2) #True
# s ="一二壹123"
# s2 =s.isnumeric() #认识中文 一二壹
# print(s2) #True
# s ="一二壹123两"
# s2 =s.isnumeric() #认识中文 一二壹 注意:两 isnumeric认为不是数字
# print(s2) #False
##判断数字的应用场景:购物车,用户在输入菜单的时候,必须保证用户输入的是数字,即可以用上isdigit()
#课上练习:用算法判断某一个字符串是否是小数
s = "-123.12"
"""
思路:
1、先去掉-,用替换-replace
2、先判断是否是整数 -isdigit()
3、如果不是整数的话
1、计算.出现的次数-count
2、判断 如果.出现的次数是1且它不是出现在最前面--startswith(),也不是出现在最后面--endswith()
就是小数
3、否则,就不是小数
注意点:
1、判断是否是整数时候,是无法判断负号-的,所以要用replace先去掉负号
"""
def isfloat(s):
s1 = s.replace("-","") #注意:判断整数的时候,是不能判断负号的-,所有要先去掉负号-
# print(s1) #123.12 字符串类型
if s1.isdigit():
print("%s是整数"%s)
else:
count_point = s.count(".") #计算点 出现的次数是1
# print(count_point) #1 #
if count_point == 1 and not s.startswith(".") and not s.endswith("."):
print("%s是小数"% s)
else:
print("%s不是小数"% s)
s="-123.99" #注意:这里的数必须是字符串类型,才能判断,最后可以用int float转换成数字
isfloat(s) #-123.99是小数
#六 计算字符串的长度
s = "dfhdhafk"
print(len(s)) #8 内置函数 和print()的写法类型 不是s.函数名()的写法
#七 可迭代
"""
可迭代的概念:可以一个一个往外取值的对象
1、字符串就是可迭代的(列表、字典都是可迭代的) 可以通过索引号的递增来取值
2、数字就不是可迭代的
"""
s = "朱元璋朱棣"
#1while取出字符串的每个字符--nok
count = 0
while count<len(s):
print(s[count]) #这里的count是索引号 可迭代对象(字符串、列表等):可以通过索引号的递增来取值
count+=1
print("----------1")
# #2for取出字符串的每个字符 (可迭代对象-字符串、列表等可以直接用for循环进行遍历,取出其中的元素)
for i in s: #把可迭代对象的每一个元素,每循环一次,都分别赋值给前面的变量i(方便可迭代对象的遍历)
# for 变量 in 可迭代对象
pass
print(i)
"""
in的两种用法
1、不在for中,是判断xxx是非在出现在str中(判断子字符串)--例子:判断敏感词-广告法
2、在for中,是把可迭代对象(字符串、列表等)的每一个元素取出来,赋值给前面的变量i
4. for循环
for 变量 in 可迭代对象:
循环体(break, continue)
else:
当循环正常结束的时候执行else(如果break,就不会执行else)
"""
#计算在字符串串"I am sylar, I'm 14 years old, I have 2 dogs!" 数字的个数
s1 = "I am sylar, I'm 14 years old, I have 2 dogs!"
count=0
for i in s1:
if i.isdigit():
# print(i)
count+=1
print(count) #统计字符串中有多少个数字
# for i in 10: #TypeError: 'int' object is not iterable
# #因为整数10不是可迭代的类型
# print(i)
for i in "10": #这里的“10”是字符串,可迭代类型
print(i)
| [
"wangtongpei@meicai.cn"
] | wangtongpei@meicai.cn |
b557056f5ed14dcda81056ce8e5cc36a59a3db25 | 35117d0c9b33e3591115b921de3bf781d6dd0dca | /chat/consumers.py | 8e97a9252c5687b11467a50d63459d6041a6e65d | [] | no_license | cnbillow/webim | f65197343aa41eebc9eaf8d4abcd7e349fc0433f | 6320580ca742754430162a4ce8be61d065b45d70 | refs/heads/master | 2020-04-07T15:38:26.708529 | 2018-12-03T06:46:56 | 2018-12-03T06:46:56 | 158,494,142 | 0 | 0 | null | 2018-11-21T05:12:59 | 2018-11-21T05:12:58 | null | UTF-8 | Python | false | false | 2,668 | py | # chat/consumers.py
from channels.generic.websocket import AsyncWebsocketConsumer
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from .models import IMGroup
import json
class ChatConsumer(AsyncWebsocketConsumer):
# 当 websocket 一链接上以后触发该函数
# self.scope可以类比的理解为django中的self.request
# 从url中取出room_name字段备用,这里的变量名是在路由中设置的
async def connect(self):
# print(self.scope['url_route'])
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
# 通知好友用户上线
# 找到该用户的所有好友,发送通知
# 查询用户的好友分组
# groups = Group.objects.filter(owner=self.room_group_name)
# for item in groups:
# for friend in item.group_members.all():
# channel_publish(friend.id, {'channel_type': 'friend_on', 'user_id': self.room_group_name})
await self.accept()
# 断开链接是触发该函数
async def disconnect(self, close_code):
# Leave room group
# groups = Group.objects.filter(owner=self.room_group_name)
# for item in groups:
# for friend in item.group_members.all():
# channel_publish(friend.id, {'channel_type': 'friend_off', 'user_id': self.room_group_name})
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
# 前端发送来消息时,通过这个接口传递
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
# Send message to room group
# 这里的type要在当前类中实现一个相应的函数,
# 下划线或者'.'的都会被Channels转换为下划线处理,
# 所以这里写 'chat.message'也没问题
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat.message',
'message': message
}
)
# Receive message from room group
async def chat_message(self, event):
message = event['message']
# Send message to WebSocket
await self.send(text_data=json.dumps({
'message': message
}))
# channels发送通知到页面
channel_layer = get_channel_layer()
def channel_publish(topic, content):
# print(topic)
# print(content)
try:
'''
type需要与consumer中的receive中一致
group_name是consumer中的room_group_name
'''
async_to_sync(channel_layer.group_send)(
topic,
{
'type': 'chat.message',
'message': content,
}
)
except Exception as e:
raise e | [
"js_huang@foxmail.com"
] | js_huang@foxmail.com |
253a0d5a8d540228170ac6262fe99d0a839bb0b5 | 489da428bc0e1ab8f5117c0f8ba5ddb7aff05360 | /scripts/motors.py | a3e07bacd50b1dce8373510020926d7be4a24904 | [
"BSD-3-Clause"
] | permissive | norihisayamada/pimouse_ros | 4f77e769b7ac9cbfc4af6e703764af1d2df56b30 | 3b07880a6ceb584d92cf640c1a38864130d44189 | refs/heads/master | 2020-04-17T03:03:49.424738 | 2019-02-23T11:05:38 | 2019-02-23T11:05:38 | 166,164,916 | 1 | 0 | BSD-3-Clause | 2019-01-17T05:18:41 | 2019-01-17T05:18:41 | null | UTF-8 | Python | false | false | 3,430 | py | #!/usr/bin/env python
#encoding: utf8
import sys, rospy, math
from pimouse_ros.msg import MotorFreqs
from geometry_msgs.msg import Twist
from std_srvs.srv import Trigger, TriggerResponse
from pimouse_ros.srv import TimedMotion #追加
class Motor():
def __init__(self):
if not self.set_power(False): sys.exit(1) #モータの電源を切る(TrueをFalseに)
rospy.on_shutdown(self.set_power)
self.sub_raw = rospy.Subscriber('motor_raw', MotorFreqs, self.callback_raw_freq)
self.sub_cmd_vel = rospy.Subscriber('cmd_vel', Twist, self.callback_cmd_vel)
self.srv_on = rospy.Service('motor_on', Trigger, self.callback_on)
self.srv_off = rospy.Service('motor_off', Trigger, self.callback_off)
self.srv_tm = rospy.Service('timed_motion', TimedMotion, self.callback_tm) #追加
self.last_time = rospy.Time.now()
self.using_cmd_vel = False
def set_power(self,onoff=False):
en = "/dev/rtmotoren0"
try:
with open(en,'w') as f:
f.write("1\n" if onoff else "0\n")
self.is_on = onoff
return True
except:
rospy.logerr("cannot write to " + en)
return False
def set_raw_freq(self,left_hz,right_hz):
if not self.is_on:
rospy.logerr("not enpowered")
return
try:
with open("/dev/rtmotor_raw_l0",'w') as lf,\
open("/dev/rtmotor_raw_r0",'w') as rf:
lf.write(str(int(round(left_hz))) + "\n")
rf.write(str(int(round(right_hz))) + "\n")
except:
rospy.logerr("cannot write to rtmotor_raw_*")
def callback_raw_freq(self,message):
self.set_raw_freq(message.left_hz,message.right_hz)
def callback_cmd_vel(self,message):
forward_hz = 80000.0*message.linear.x/(9*math.pi)
rot_hz = 400.0*message.angular.z/math.pi
self.set_raw_freq(forward_hz-rot_hz, forward_hz+rot_hz)
self.using_cmd_vel = True
self.last_time = rospy.Time.now()
def onoff_response(self,onoff): #以下3つのメソッドを追加
d = TriggerResponse()
d.success = self.set_power(onoff)
d.message = "ON" if self.is_on else "OFF"
return d
def callback_on(self,message): return self.onoff_response(True)
def callback_off(self,message): return self.onoff_response(False)
def callback_tm(self,message):
if not self.is_on:
rospy.logerr("not enpowered")
return False
dev = "/dev/rtmotor0"
try:
with open(dev,'w') as f:
f.write("%d %d %d\n" %
(message.left_hz,message.right_hz,message.duration_ms))
except:
rospy.logerr("cannot write to " + dev)
return False
return True
if __name__ == '__main__':
rospy.init_node('motors')
m = Motor()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if m.using_cmd_vel and rospy.Time.now().to_sec() - m.last_time.to_sec() >= 1.0:
m.set_raw_freq(0,0)
m.using_cmd_vel = False
rate.sleep()
# Copyright 2016 Ryuichi Ueda
# Released under the BSD License.
# To make line numbers be identical with the book, this statement is written here. Don't move it to the header.
| [
"ryuichiueda@gmail.com"
] | ryuichiueda@gmail.com |
e96ed567a6e93e60ecbb430c97f1b152028d82cc | 4502834012bd50135569a820a502c2d965b0a1df | /wdom/themes/vital.py | 05f72b22f634c0e9e334e8dcfe9d7b66955ab40a | [
"MIT"
] | permissive | miyakogi/wdom | 9db7fccf7402fa2e2fc97f53c2a42e4aa2b8633a | c7cd8b3428ca154af6fb1ecb6c7d2f0e17551802 | refs/heads/master | 2020-04-04T22:07:12.970584 | 2018-03-05T15:32:55 | 2018-03-05T15:32:55 | 49,849,994 | 72 | 14 | null | null | null | null | UTF-8 | Python | false | false | 2,152 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# flake8: noqa
from wdom.tag import NewTagClass as NewTag
from wdom.themes import *
name = 'Vital'
project_url = 'https://vitalcss.com/'
project_repository = 'https://github.com/doximity/vital'
license = 'Apache 2.0'
license_url = 'https://github.com/doximity/vital/blob/master/LICENSE.md'
css_files = [
'//cdn.rawgit.com/doximity/vital/v2.2.1/dist/css/vital.min.css',
]
Button = NewTag('Button', bases=Button, class_='btn')
DefaultButton = NewTag('DefaultButton', 'button', Button, class_='solid', is_='default-button')
PrimaryButton = NewTag('PrimaryButton', 'button', DefaultButton, class_='blue', is_='primary-button')
SecondaryButton = NewTag('SecondaryButton', 'button', Button, class_='blue', is_='secondary-button')
SuccessButton = NewTag('SuccessButton', 'button', DefaultButton, class_='green', is_='success-button')
InfoButton = NewTag('InfoButton', 'button', Button, class_='blue', is_='info-button')
WarningButton = NewTag('WarningButton', 'button', DefaultButton, class_='orange', is_='warning-button')
DangerButton = NewTag('DangerButton', 'button', DefaultButton, class_='red', is_='danger-button')
ErrorButton = NewTag('ErrorButton', 'button', DefaultButton, class_='red', is_='error-button')
LinkButton = NewTag('LinkButton', 'button', Button, class_='no-outline blue', is_='link-button')
Ol = NewTag('Ol', 'ol', class_='list')
Ul = NewTag('Ul', 'Ul', class_='list')
Col = NewTag('Col', 'div', Col, class_='col')
# Col1 = NewTag('Col1', 'div', Col1, class_='col-1-12')
# Col2 = NewTag('Col2', 'div', Col2, class_='col-1-6')
Col3 = NewTag('Col3', 'div', Col3, class_='col-1-4')
Col4 = NewTag('Col4', 'div', Col4, class_='col-1-3')
# Col5 = NewTag('Col5', 'div', Col5, class_='col-5-12')
Col6 = NewTag('Col6', 'div', Col6, class_='col-1-2')
# Col7 = NewTag('Col7', 'div', Col7, class_='col-7-12')
Col8 = NewTag('Col8', 'div', Col8, class_='col-2-3')
Col9 = NewTag('Col9', 'div', Col9, class_='col-3-4')
# Col10 = NewTag('Col10', 'div', Col10, class_='col-5-6')
# Col11 = NewTag('Col11', 'div', Col11, class_='col-11-12')
# Col12 = NewTag('Col12', 'div', Col12, class_='col-1-1')
| [
"miyako.dev@gmail.com"
] | miyako.dev@gmail.com |
91f2d3d83f90775793a431658c4f158ea7eeeb1b | aea0837dd60da3e3746c5ab0970bde246db9493d | /allapps/profiles/signals.py | 4692cdce5b6e15e132221dfde54eb9745bd88e20 | [] | no_license | anykate/veryacademy-demo | 5f05707b1fd9085ef1a088b3fc02f48dfa0bd897 | 697d9ab6fffd169c80d6b3fcc0e838e4b5ae6739 | refs/heads/master | 2023-03-13T18:52:39.499619 | 2020-07-15T19:45:22 | 2020-07-15T19:45:22 | 279,959,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
from django.dispatch import receiver
from ..profiles.models import UserProfile
@receiver(post_save, sender=get_user_model())
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
@receiver(post_save, sender=get_user_model())
def save_user_profile(sender, instance, **kwargs):
instance.profiles.save()
| [
"aryamane.aniket@gmail.com"
] | aryamane.aniket@gmail.com |
37550aa18fc249a080093d9df9dddf1746e817c1 | d721258b53f0f44b1010cb8e8efac8e2a5c96c26 | /adventure/migrations/0015_auto_20160503_2339.py | adf3848226a6d63e10f367b4eadfaf5402adac09 | [
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | kdechant/eamon | a6662285f51a6cad5797bb9be92ca709ae36921c | 080a43aa80c3a1605c402e68616545a8e9c7975c | refs/heads/master | 2023-05-24T08:20:18.551604 | 2022-08-14T10:27:01 | 2023-04-08T07:31:45 | 49,559,304 | 28 | 7 | MIT | 2023-03-14T21:09:55 | 2016-01-13T08:07:28 | TypeScript | UTF-8 | Python | false | false | 1,252 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-04 06:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adventure', '0014_auto_20160419_2324'),
]
operations = [
migrations.AlterField(
model_name='player',
name='gender',
field=models.CharField(choices=[('m', 'Male'), ('f', 'Female')], max_length=6),
),
migrations.AlterField(
model_name='playerartifact',
name='dice',
field=models.IntegerField(default=1, null=True),
),
migrations.AlterField(
model_name='playerartifact',
name='odds',
field=models.IntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='playerartifact',
name='sides',
field=models.IntegerField(default=1, null=True),
),
migrations.AlterField(
model_name='playerartifact',
name='weapon_type',
field=models.IntegerField(choices=[(1, 'Axe'), (2, 'Bow'), (3, 'Club'), (4, 'Spear'), (5, 'Sword')], default=0, null=True),
),
]
| [
"keith.dechant@gmail.com"
] | keith.dechant@gmail.com |
55f755b1f5a9ff18d933ab5ec6fe7a4c62a49b2c | 7e01c039f2427d434a4ef44a1b9dc0ea21db65ba | /venv/lib/python3.8/site-packages/django/contrib/gis/db/backends/postgis/adapter.py | 8f456ff7c0f6bac9b3fc69a9235e9221b9a73623 | [] | no_license | dmfranz/Spike-exercise | 09f8051163d2a63dfbc3f75da2de0a1bbbbb122d | 83971e95a72d504f629778fece2cdfb953e5d08b | refs/heads/main | 2023-08-23T04:18:43.934471 | 2021-10-11T04:54:28 | 2021-10-11T04:54:28 | 413,568,735 | 0 | 1 | null | 2021-10-11T04:36:22 | 2021-10-04T20:10:01 | Python | UTF-8 | Python | false | false | 2,257 | py | """
This object provides quoting for GEOS geometries into PostgreSQL/PostGIS.
"""
from psycopg2 import Binary
from psycopg2.extensions import ISQLQuote
from django.contrib.gis.db.backends.postgis.pgraster import to_pgraster
from django.contrib.gis.geos import GEOSGeometry
class PostGISAdapter:
def __init__(self, obj, geography=False):
"""
Initialize on the spatial object.
"""
self.is_geometry = isinstance(obj, (GEOSGeometry, PostGISAdapter))
# Getting the WKB (in string form, to allow easy pickling of
# the adaptor) and the SRID from the geometry or raster.
if self.is_geometry:
self.ewkb = bytes(obj.ewkb)
self._adapter = Binary(self.ewkb)
else:
self.ewkb = to_pgraster(obj)
self.srid = obj.srid
self.geography = geography
def __conform__(self, proto):
"""Does the given protocol conform to what Psycopg2 expects?"""
if proto == ISQLQuote:
return self
else:
raise Exception('Error implementing psycopg2 protocol. Is psycopg2 installed?')
def __eq__(self, other):
return isinstance(other, PostGISAdapter) and self.ewkb == other.ewkb
def __hash__(self):
return hash(self.ewkb)
def __str__(self):
return self.getquoted()
@classmethod
def _fix_polygon(cls, poly):
return poly
def prepare(self, conn):
"""
This method allows escaping the binary in the style required by the
server's `standard_conforming_string` setting.
"""
if self.is_geometry:
self._adapter.prepare(conn)
def getquoted(self):
"""
Return a properly quoted string for use in PostgreSQL/PostGIS.
"""
if self.is_geometry:
# Psycopg will figure out whether to use E'\\000' or '\000'.
return '%s(%s)' % (
'ST_GeogFromWKB' if self.geography else 'ST_GeomFromEWKB',
self._adapter.getquoted().decode()
)
else:
# For rasters, add explicit type cast to WKB string.
return "'%s'::raster" % self.ewkb
| [
"marmara@wisc.edu"
] | marmara@wisc.edu |
32cd827945206d96c0bf02543fc24863d50c006a | 7329f788dc8e48db398cee81da7ca9621d681878 | /mljar/client/result.py | 5d000930c285a3cba9dfb7dbb6912c5c7b5fc50d | [
"Apache-2.0"
] | permissive | armandoayudame/mljar-api-python | 87c810168d348bcc988aa816f782e8f32f6c2281 | b4843a59bb22060707da569b4aa569ab40669421 | refs/heads/master | 2020-03-09T06:33:33.006071 | 2017-07-20T15:22:34 | 2017-07-20T15:22:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | from base import MljarHttpClient
from ..model.result import Result
from ..exceptions import NotFoundException
class ResultClient(MljarHttpClient):
'''
Client to interact with MLJAR results (models).
'''
def __init__(self, project_hid):
self.url = "/results/"
self.project_hid = project_hid
super(ResultClient, self).__init__()
def get_results(self, experiment_hid = None):
'''
List all models.
'''
data = {'project_id': self.project_hid}
if experiment_hid is not None:
data['experiment_id'] = experiment_hid
response = self.request("POST", self.url, data = data)
results_dict = response.json()
return [Result.from_dict(r) for r in results_dict]
| [
"pplonski86@gmail.com"
] | pplonski86@gmail.com |
e99861aa467364b0490888232da7c8bcbacdb2f1 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/models/diff_disk_settings.py | ac0d894264c9c6e558e3b2eb6af585ab1412167d | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,184 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DiffDiskSettings(Model):
"""Describes the parameters of ephemeral disk settings that can be specified
for operating system disk. <br><br> NOTE: The ephemeral disk settings can
only be specified for managed disk.
:param option: Specifies the ephemeral disk settings for operating system
disk. Possible values include: 'Local'
:type option: str or
~azure.mgmt.compute.v2018_10_01.models.DiffDiskOptions
"""
_attribute_map = {
'option': {'key': 'option', 'type': 'str'},
}
def __init__(self, **kwargs):
super(DiffDiskSettings, self).__init__(**kwargs)
self.option = kwargs.get('option', None)
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
4b8702aa921be40726e62a3312c0bce0e233541d | 41a1b3d3491037000de0735823c3d8720f746af0 | /Lib/fontgoggles/mac/sliderGroup.py | ec445810098eb93da099699a4d8f63883e05ffa7 | [
"Apache-2.0"
] | permissive | chrissimpkins/fontgoggles | a8ea5391605c0197e85db211d81b1e290a1f6909 | 814eef1633e017da6fb9a48441e5860d4b5769a3 | refs/heads/master | 2023-03-21T02:22:11.102996 | 2021-03-26T15:45:40 | 2021-03-26T15:45:40 | 244,532,440 | 1 | 0 | Apache-2.0 | 2020-03-03T03:28:11 | 2020-03-03T03:28:10 | null | UTF-8 | Python | false | false | 7,019 | py | import AppKit
from vanilla import Button, EditText, Group, Slider, TextBox, VanillaBaseObject
from fontgoggles.misc.properties import weakrefCallbackProperty
class SliderGroup(Group):
_callback = weakrefCallbackProperty()
def __init__(self, width, sliderInfo, continuous=True, callback=None):
super().__init__((0, 0, width, 0))
self._callback = callback
self._continuous = continuous
self._tags = []
self.setSliderInfo(sliderInfo)
def _breakCycles(self):
self._callback = None
super()._breakCycles()
def setSliderInfo(self, sliderInfo):
savedState = self.get()
# clear all subviews
for attr, value in list(self.__dict__.items()):
if isinstance(value, VanillaBaseObject):
delattr(self, attr)
margin = 10
y = margin
self._tags = []
self._defaultValues = {}
for tag, (label, minValue, defaultValue, maxValue) in sliderInfo.items():
self._tags.append(tag)
self._defaultValues[tag] = defaultValue
attrName = f"slider_{tag}"
slider = SliderPlus((margin, y, -margin, 40), label, minValue, defaultValue, maxValue,
continuous=self._continuous, callback=self._sliderChanged)
setattr(self, attrName, slider)
y += 50
self.resetAllButton = Button((10, y, 120, 25), "Reset all axes", self._resetAllButtonCallback)
self.resetAllButton.enable(False)
y += 35
posSize = (0, 0, self.getPosSize()[2], y)
self.setPosSize(posSize)
self._updateState(savedState)
def _sliderChanged(self, sender):
self.resetAllButton.enable(True)
callCallback(self._callback, self)
def _resetAllButtonCallback(self, sender):
self.resetAllButton.enable(False)
for tag in self._tags:
attrName = f"slider_{tag}"
slider = getattr(self, attrName)
slider.set(self._defaultValues[tag])
callCallback(self._callback, self)
def get(self):
state = {}
for tag in self._tags:
attrName = f"slider_{tag}"
slider = getattr(self, attrName)
value = slider.get()
if value is not None:
if len(self._defaultValues[tag]) != 1 or value not in self._defaultValues[tag]:
state[tag] = value
return state
def _updateState(self, state):
for tag, value in state.items():
attrName = f"slider_{tag}"
slider = getattr(self, attrName, None)
if slider is not None:
slider.set(value)
def set(self, state):
if state:
self.resetAllButton.enable(True)
for tag in self._tags:
attrName = f"slider_{tag}"
slider = getattr(self, attrName)
value = state.get(tag)
if value is None:
value = self._defaultValues[tag]
slider.set(value)
class SliderPlus(Group):
_callback = weakrefCallbackProperty()
def __init__(self, posSize, label, minValue, value, maxValue, continuous=True, callback=None):
super().__init__(posSize)
self._callback = callback
self.label = TextBox((0, 0, 0, 20), label)
self.slider = Slider((0, 18, -60, 20), value=minValue, minValue=minValue, maxValue=maxValue,
continuous=continuous, callback=self._sliderCallback)
self.editField = EditText((-50, 16, 0, 24), "", continuous=False, callback=self._editFieldCallback)
self.editField._nsObject.setAlignment_(AppKit.NSRightTextAlignment)
self._setSliderFromValue(value)
self._setEditFieldFromValue(value)
def _breakCycles(self):
self._callback = None
super()._breakCycles()
def _sliderCallback(self, sender):
value = sender.get()
self._setEditFieldFromValue(value)
callCallback(self._callback, self)
def _editFieldCallback(self, sender):
value = sender.get()
if not value:
# self._setSliderFromValue(None)
callCallback(self._callback, self)
return
value = value.replace(",", ".")
try:
f = float(value)
except ValueError:
pass
else:
self.slider.set(f)
sliderValue = self.slider.get()
if sliderValue != f:
self._setEditFieldFromValue(sliderValue)
callCallback(self._callback, self)
def _setSliderFromValue(self, value):
if isinstance(value, set):
value = sum(value) / len(value)
elif value is None:
minValue = self.slider._nsObject.minValue()
maxValue = self.slider._nsObject.maxValue()
value = (minValue + maxValue) / 2
self.slider.set(value)
def _setEditFieldFromValue(self, value):
if isinstance(value, set):
if len(value) == 1:
value = next(iter(value))
else:
value = None
if value is None:
s = ""
else:
if int(value) == value:
s = str(int(value))
else:
s = f"{value:.1f}"
self.editField.set(s)
def get(self):
if not self.editField.get():
return None
else:
return self.slider.get()
def set(self, value):
self._setSliderFromValue(value)
self._setEditFieldFromValue(value)
def callCallback(callback, sender):
if callback is not None:
callback(sender)
if __name__ == "__main__":
from random import random
from vanilla import Window
class SliderTest:
def __init__(self):
self.w = Window((300, 400), "SliderTest", autosaveName="SliderTestttt")
# self.w.slider1 = SliderPlus((10, 10, -10, 50), "Slider 1", 0, 50, 100)
# self.w.slider2 = SliderPlus((10, 60, -10, 50), "Slider 2", 0, 50, 100)
info = [("abcd", "The alphabet"),
("xyz ", "The alphabet part 2"),
("wdth", "Width"),
("wght", "Weight")]
self.sliderInfo = {}
for tag, label in info:
self.sliderInfo[tag] = (label, 0, 50, 100)
self.w.sliderGroup = SliderGroup(300, self.sliderInfo, continuous=True, callback=self.sliderGroupCallback)
self.w.mutateButton = Button((10, -40, 80, 20), "Mutate", callback=self.mutateCallback)
self.w.open()
def sliderGroupCallback(self, sender):
print(sender.get())
def mutateCallback(self, sender):
state = {}
for tag, (label, minValue, defaultValue, maxValue) in self.sliderInfo.items():
v = minValue + (maxValue - minValue) * random()
state[tag] = v
self.w.sliderGroup.set(state)
t = SliderTest()
| [
"justvanrossum@gmail.com"
] | justvanrossum@gmail.com |
72e698fc4e1fed404313ed161df45403fb8971a5 | 6411acb3828fec8cc79f6f2630a4d99e8352a9aa | /326.py | f29ee1980db2b5d3ee63d806ebccad065e793aa1 | [] | no_license | zhouliuling/Leetcode_Task | 95f55a3ca33b561a130fc15272e513d9af2c0317 | 0230d31351a4093e8ae6be5fe0c175f3f41e08ac | refs/heads/master | 2020-05-25T18:44:59.377953 | 2019-09-12T02:14:22 | 2019-09-12T02:14:22 | 187,935,297 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | ## 3的幂 超时
## 循环
class Solution(object):
def isPowerOfThree(self, n):
"""
:type n: int
:rtype: bool
"""
if n == 0:
return False
while n >= 3:
if n % 3 == 0 and n > 3:
n = n/3
if n == 3:
break
if n == 3:return True
else:return False
| [
"noreply@github.com"
] | zhouliuling.noreply@github.com |
16e8e3d398736c6e70c7d28c21ad2a6de16d3a9a | dae4ab4882080344e5f505def7e2e59e0ed888b4 | /polyaxon/scheduler/spawners/horovod_spawner.py | a7b8b56fea439cfa513c771759a4f92ac18a4103 | [
"MPL-2.0"
] | permissive | vfdev-5/polyaxon | 8c3945604e8eaa25ba8b3a39ed0838d0b9f39a28 | 3e1511a993dc1a03e0a0827de0357f4adcc0015f | refs/heads/master | 2021-07-09T22:27:23.272591 | 2018-11-01T23:44:44 | 2018-11-01T23:44:44 | 154,320,634 | 0 | 0 | MIT | 2018-10-23T12:01:34 | 2018-10-23T12:01:33 | null | UTF-8 | Python | false | false | 3,769 | py | from scheduler.spawners.experiment_spawner import ExperimentSpawner
from schemas.environments import HorovodClusterConfig
from schemas.specifications import HorovodSpecification
from schemas.tasks import TaskType
class HorovodSpawner(ExperimentSpawner):
MASTER_SERVICE = True
WORKER_SERVICE = True
@property
def resources(self):
cluster, is_distributed, = self.spec.cluster_def
worker_resources = HorovodSpecification.get_worker_resources(
environment=self.spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
return {
TaskType.MASTER: {0: self.spec.master_resources},
TaskType.WORKER: worker_resources,
}
@property
def node_selectors(self):
cluster, is_distributed, = self.spec.cluster_def
worker_node_selectors = HorovodSpecification.get_worker_node_selectors(
environment=self.spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
return {
TaskType.MASTER: {0: self.spec.master_node_selector},
TaskType.WORKER: worker_node_selectors,
}
@property
def affinities(self):
cluster, is_distributed, = self.spec.cluster_def
worker_affinities = HorovodSpecification.get_worker_affinities(
environment=self.spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
return {
TaskType.MASTER: {0: self.spec.master_affinity},
TaskType.WORKER: worker_affinities,
}
@property
def tolerations(self):
cluster, is_distributed, = self.spec.cluster_def
worker_tolerations = HorovodSpecification.get_worker_tolerations(
environment=self.spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
return {
TaskType.MASTER: {0: self.spec.master_affinity},
TaskType.WORKER: worker_tolerations,
}
def get_resources(self, task_type, task_idx):
return self.resources.get(task_type, {}).get(task_idx)
def get_node_selector(self, task_type, task_idx):
return self.node_selectors.get(task_type, {}).get(task_idx)
def get_affinity(self, task_type, task_idx):
return self.affinities.get(task_type, {}).get(task_idx)
def get_tolerations(self, task_type, task_idx):
return self.tolerations.get(task_type, {}).get(task_idx)
def get_n_pods(self, task_type):
return self.spec.cluster_def[0].get(task_type, 0)
def start_experiment(self):
experiment = super().start_experiment()
experiment[TaskType.WORKER] = self.create_multi_jobs(task_type=TaskType.WORKER,
add_service=self.WORKER_SERVICE)
return experiment
def stop_experiment(self):
deleted = super().stop_experiment()
if not self.delete_multi_jobs(task_type=TaskType.WORKER, has_service=self.WORKER_SERVICE):
deleted = False
return deleted
def get_cluster(self):
cluster_def, _ = self.spec.cluster_def
job_name = self.pod_manager.get_job_name(task_type=TaskType.MASTER, task_idx=0)
cluster_config = {
TaskType.MASTER: [self._get_pod_address(job_name)]
}
workers = []
for i in range(cluster_def.get(TaskType.WORKER, 0)):
job_name = self.pod_manager.get_job_name(task_type=TaskType.WORKER, task_idx=i)
workers.append(self._get_pod_address(job_name))
cluster_config[TaskType.WORKER] = workers
return HorovodClusterConfig.from_dict(cluster_config).to_dict()
| [
"mouradmourafiq@gmail.com"
] | mouradmourafiq@gmail.com |
80e587f59af889c72ed9b2e941144441a2eda60e | 12123592a54c4f292ed6a8df4bcc0df33e082206 | /py2/pgms/appx/timedeltas.py | a1e94beb0b7ea3d8b5472cc307de9f8b3ec91e80 | [] | no_license | alvinooo/advpython | b44b7322915f832c8dce72fe63ae6ac7c99ef3d4 | df95e06fd7ba11b0d2329f4b113863a9c866fbae | refs/heads/master | 2021-01-23T01:17:22.487514 | 2017-05-30T17:51:47 | 2017-05-30T17:51:47 | 92,860,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
# timedeltas.py - timedelta objects in datetime module
from datetime import timedelta, datetime
td1 = timedelta(days=2, hours=18)
td2 = timedelta(hours=8.5)
td3 = td1 + td2
days = td3.days
hours = td3.seconds / float(3600)
hoursahead = td3.total_seconds() / 3600
print days, hours, hoursahead
dt1 = datetime(2016, 2, 25)
dt2 = datetime(2016, 3, 6)
td4 = dt2 - dt1
print td4.days
now = datetime.today()
print now
print now + timedelta(minutes=10)
###############################################
#
# $ timedeltas.py
# 3 2.5 74.5
# 10
# 2016-07-09 14:34:36.785123
# 2016-07-09 14:44:36.785123
#
| [
"alvin.heng@teradata.com"
] | alvin.heng@teradata.com |
f4c9a708bac850f83274e6b93266d0c857a07a67 | 1fe56144905244643dbbab69819720bc16031657 | /.history/books/admin_20210422174025.py | 974b41eccc123608d1d211f8a7c120fc32c54b59 | [] | no_license | RaghdaMadiane/django | 2052fcdd532f9678fefb034bd60e44f466bd9759 | 6ca3f87f0b72880f071d90968f0a63ea5badcca8 | refs/heads/master | 2023-04-15T17:28:25.939823 | 2021-04-24T22:33:21 | 2021-04-24T22:33:21 | 361,279,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | from django.contrib import admin
from .models import Book ,Category ,Tag,Isbn
from .forms import BookForm
from django import forms
from django.core.exceptions import ValidationError
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields='__all__'
def clean_title(self):
title=self.cleaned_data.get("title")
titleLength=len(title)
if titleLength<10:
raise ValidationError("title should be more than 10 chars!")
if titleLength>20:
raise ValidationError("title should be less than 20 chars!")
return title
def clean_category(self):
category=self.cleaned_data.get("category")
catLength=len(category)
if catLength<2:
raise ValidationError("category name length should be more than 2 chars!")
return category
class BookAdmin(admin.ModelAdmin):
form=BookForm
list_filter=("categories",)
class BookInLine(admin.StackedInline):
model=Book
max_num =3
extra = 1
class TagAdmin(admin.ModelAdmin):
inlines=[BookInLine]
admin.site.register(Book,BookAdmin)
admin.site.register(Category)
admin.site.register(Isbn)
admin.site.register(Tag,TagAdmin)
| [
"raghdamadiane@gmail.com"
] | raghdamadiane@gmail.com |
f62b38f7fe3ecd7a5044a7c77e8f64f638e862eb | 34e0865fb4915390e77336e81b2c87ec2bf52df6 | /tweet/admin.py | 18778cdb4a53c4545a3921863969b68e8c08ff99 | [] | no_license | HiPiH/local | 3702be6b140fe879188e9623ede27adfc1ce8765 | 6c3bd2c0818c780977c2081ab72906f0166625dd | refs/heads/master | 2021-01-25T04:50:29.096944 | 2011-12-24T08:21:39 | 2011-12-24T08:21:39 | 3,026,345 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | # -*- coding: utf-8 -*-
__author__ = 'Aleksey.Novgorodov'
from django.contrib.admin import site,ModelAdmin
from models import TweetWords,TweetLang
site.register(TweetLang)
site.register(TweetWords) | [
"admin@nvk.su"
] | admin@nvk.su |
572ac80b5a8fb2a0418cfa931e3dc7d560f7d0d6 | 127b460b1d540e6f8c3aa90dfc04e8abf84a97ff | /parler/tests/testapp/models.py | 0aa2c9f279bbb4b37ce1077dcc646bfacb091b25 | [
"Apache-2.0"
] | permissive | philippeowagner/django-parler | c10634a863088708644cbe4592bac635553e44a0 | db0ff40760759f15620954994a2e2f8584733de4 | refs/heads/master | 2021-01-09T05:31:38.302665 | 2013-09-25T13:22:07 | 2013-09-25T13:23:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | from django.db import models
from parler.fields import TranslatedField
from parler.models import TranslatableModel, TranslatedFields, TranslatedFieldsModel
class ManualModel(TranslatableModel):
shared = models.CharField(max_length=200, default='')
class ManualModelTranslations(TranslatedFieldsModel):
master = models.ForeignKey(ManualModel, related_name='translations')
tr_title = models.CharField(max_length=200)
class SimpleModel(TranslatableModel):
shared = models.CharField(max_length=200, default='')
translations = TranslatedFields(
tr_title = models.CharField(max_length=200)
)
def __unicode__(self):
return self.tr_title
class AnyLanguageModel(TranslatableModel):
shared = models.CharField(max_length=200, default='')
tr_title = TranslatedField(any_language=True)
translations = TranslatedFields(
tr_title = models.CharField(max_length=200)
)
def __unicode__(self):
return self.tr_title
| [
"vdboor@edoburu.nl"
] | vdboor@edoburu.nl |
06c37b6866986287f6f5442dcec9d3ecc5a2d997 | 988c3065d6c475c0a90765b2245de54624325e22 | /venv/bin/pylint | 298cec919a098ab672eeb5f932e9ea3b6880dc57 | [
"MIT"
] | permissive | brayomumo/Instagram | c7e3ec061e4503320f3e1ed78096d032044f2435 | cbe932eac29afda480a0cbdea71a9f1eda01845c | refs/heads/master | 2021-09-09T13:20:07.682824 | 2019-09-02T09:06:47 | 2019-09-02T09:06:47 | 205,555,185 | 0 | 0 | null | 2021-09-08T01:15:00 | 2019-08-31T14:31:57 | Python | UTF-8 | Python | false | false | 263 | #!/home/smoucha/Desktop/projects/instagram/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pylint())
| [
"brayomumo5@gmail.com"
] | brayomumo5@gmail.com | |
b938e93bf47ff375e76e067089ecc3cb9b52a355 | 44702094bd2f512cc672ade27e39ca29755f89ac | /projects/chatbot/simple-bot.py | 6b33b0a67a0e789eb8e6fdca5a01e34e990489fd | [
"Python-2.0",
"MIT"
] | permissive | AIMLCamp/python-mini-projects | 20a1c15faa645aa4bd9a24efb3cee3da555201f5 | aaa166bb190d06b2264afc548a29c998a6664a3e | refs/heads/master | 2023-09-02T04:52:00.311157 | 2021-11-22T02:19:12 | 2021-11-22T02:19:12 | 407,899,754 | 0 | 0 | MIT | 2021-11-22T02:19:12 | 2021-09-18T15:38:25 | null | UTF-8 | Python | false | false | 268 | py | import asyncio
from wechaty import Wechaty, Message
async def on_message(msg: Message):
if msg.text() == 'ding':
await msg.say('dong')
async def main():
bot = Wechaty()
bot.on('message', on_message)
await bot.start()
asyncio.run(main())
| [
"1435130236@qq.com"
] | 1435130236@qq.com |
7b1a9539da50bb87e753173c18e83da533c2e5d9 | 46c76c7ca1d9d030606f2e3e95a2a9e6bbad2789 | /args2.py | b2b54870182508e085c37f588246b43d14263d83 | [] | no_license | KayMutale/pythoncourse | be9ff713cffc73c1b9b3c1dd2bdd6d293637ce1e | 985a747ff17133aa533b7a049f83b37fc0fed80e | refs/heads/master | 2023-04-13T07:58:00.993724 | 2021-04-16T14:19:41 | 2021-04-16T14:19:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py |
#!/usr/bin/env python3
import sys
if len(sys.argv) < 2:
arg = input("enter stop|start|restart: ")
else:
arg = sys.argv[1]
out = {
"start" : "starting",
"stop" : "stopping",
"restart" : "stopping\nstarting"
}
print out.get(arg,"usage: stops|start|restart")
| [
"mark@ledge.co.za"
] | mark@ledge.co.za |
dc182a07b673bb672258ea59918ef4ebc350823c | 2a5c0c49319989a24f9c9f18530f109bc48a8df1 | /CursesEnded/SecondYear/PythonAnaconda(BigData)/exam/pycharm/1.py | 34bc5d217826a141d75c77fb83c323b4654f861a | [] | no_license | Kamil-IT/Studies | 0ada6dd92d7ecdbd0a3164c7c80080dd715ce8fc | d70343b2b7818ce303d816443bb15d21e03b42e0 | refs/heads/master | 2022-12-22T01:55:35.047783 | 2022-10-20T18:55:29 | 2022-10-20T18:55:29 | 217,039,987 | 0 | 1 | null | 2022-12-10T06:03:55 | 2019-10-23T11:24:33 | Jupyter Notebook | UTF-8 | Python | false | false | 3,418 | py | from matplotlib import pyplot as plt
from numpy import random
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
def generate_data(quantity, cech_quantity, class_quantity):
result_to_learn = []
class_to_learn = []
for j in range(class_quantity):
for i in range(quantity // class_quantity):
row = random.rand(cech_quantity) + j - 0.5 * j
result_to_learn.append(row)
class_to_learn.append(j)
return result_to_learn, class_to_learn
def generate_data_to_plot_data(generate_data, generated_class):
result = [[] for i in set(generated_class)]
for i in range(len(generate_data)):
result[generated_class[i]].append(generate_data[i])
return result
def plot_generated_data(generate_data, generated_class):
colors = ['r+', 'b+', 'm+', 'y+', 'c+', 'g+']
unique_class = set(generated_class)
colors = [colors[i] for i in range(len(unique_class))]
data_to_plot = generate_data_to_plot_data(generate_data, generated_class)
if len(generate_data[0]) == 2:
fig = plt.figure()
fig.add_subplot(111)
def plot_figure(data, color, label):
plt.plot([i[0] for i in data], [i[1] for i in data], color, label=label)
for i in range(len(colors)):
plot_figure(data_to_plot[i], colors[i], f'class {i}')
plt.legend()
plt.show()
else:
fig = plt.figure()
fig.add_subplot(111, projection='3d')
def plot_figure(data, color, label):
plt.plot([i[0] for i in data], [i[1] for i in data], [i[2] for i in data], color, label=label)
for i in range(len(colors)):
plot_figure(data_to_plot[i], colors[i], f'class {i}')
plt.legend()
plt.show(block=False)
def tp(prediction, classes):
score = 0
for i in range(len(prediction)):
if prediction[i] == 1 and classes[i] == 1:
score += 1
return score
def fp(prediction, classes):
score = 0
for i in range(len(prediction)):
if prediction[i] == 1 and classes[i] == 0:
score += 1
return score
def fn(prediction, classes):
score = 0
for i in range(len(prediction)):
if prediction[i] == 0 and classes[i] == 1:
score += 1
return score
def tn(prediction, classes):
score = 0
for i in range(len(prediction)):
if prediction[i] == 0 and classes[i] == 0:
score += 1
return score
data, classes = generate_data(200, 2, 2)
train_data, test_data, train_class, test_class = train_test_split(data, classes, test_size=0.3)
plot_generated_data(train_data, train_class)
plot_generated_data(test_data, test_class)
clf = SVC()
clf.fit(train_data, train_class)
# Prediction
predicted = clf.predict(test_data)
tn_val = tn(predicted, test_class)
tp_val = tp(predicted, test_class)
fp_val = fp(predicted, test_class)
fn_val = fn(predicted, test_class)
dokladnosc = (tp_val / tn_val) / (tp_val + tn_val + fp_val + fn_val)
precyzja = tp_val / (tp_val + fp_val)
specyficznosc = tn_val / (tn_val + fp_val)
print("Bazowałem na klasyfikatorze SVC")
print("Dokładność " + str(dokladnosc))
print("precyzja " + str(precyzja))
print("specyficznosc " + str(specyficznosc))
fig = plt.figure()
fig.add_subplot(111)
plt.bar(['Dokładność', 'precyzja', 'specyficznosc'], [dokladnosc, precyzja, specyficznosc])
plt.show() | [
"kkwolny@vp.pl"
] | kkwolny@vp.pl |
c3bb33f234a16ed5e919d2aafe6a6b045faef2da | 9080e6e53da365b0d811099e2e992041cf4b5b47 | /0x05-python-exceptions/100-safe_print_integer_err.py | b2fcf082bb0a97016d8da2c794ed362c5a96455f | [] | no_license | benjamesian/holbertonschool-higher_level_programming | 213ad8c39d1fc2ee81843124a46914be166445d3 | 99f00414833757e3b156c148927a858ce38baa0e | refs/heads/master | 2020-07-23T00:52:46.858544 | 2020-02-11T22:03:24 | 2020-02-11T22:03:24 | 207,389,880 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | #!/usr/bin/python3
import sys
def safe_print_integer_err(value):
try:
print("{:d}".format(value))
return True
except Exception as inst:
sys.stderr.write("Exception: {}\n".format(inst))
return False
| [
"808@holbertonschool.com"
] | 808@holbertonschool.com |
f224136fc4dfb21113fa0f2acbc8d724fe512e68 | d61d0498f1dde41ec07878f1ef5da039c8351cff | /examples/DeepQNetwork/DQN.py | 6000f95fa4f600fcf01c625f56f8f83659f700ec | [
"Apache-2.0"
] | permissive | Johnson-yue/tensorpack | ca9a4e0de9d7292f696d634122d65eaa6f34d26d | a0601fb76df87f567e456ba97f3e51aa85ec50e0 | refs/heads/master | 2021-05-07T03:42:56.643626 | 2017-11-14T15:14:47 | 2017-11-14T15:14:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,906 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: DQN.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import os
import argparse
import cv2
import tensorflow as tf
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from DQNModel import Model as DQNModel
from common import Evaluator, eval_model_multithread, play_n_episodes
from atari_wrapper import FrameStack, MapState, FireResetEnv
from expreplay import ExpReplay
from atari import AtariPlayer
BATCH_SIZE = 64
IMAGE_SIZE = (84, 84)
FRAME_HISTORY = 4
ACTION_REPEAT = 4 # aka FRAME_SKIP
UPDATE_FREQ = 4
GAMMA = 0.99
MEMORY_SIZE = 1e6
# will consume at least 1e6 * 84 * 84 bytes == 6.6G memory.
INIT_MEMORY_SIZE = MEMORY_SIZE // 20
STEPS_PER_EPOCH = 10000 // UPDATE_FREQ * 10 # each epoch is 100k played frames
EVAL_EPISODE = 50
NUM_ACTIONS = None
ROM_FILE = None
METHOD = None
def get_player(viz=False, train=False):
env = AtariPlayer(ROM_FILE, frame_skip=ACTION_REPEAT, viz=viz,
live_lost_as_eoe=train, max_num_frames=30000)
env = FireResetEnv(env)
env = MapState(env, lambda im: cv2.resize(im, IMAGE_SIZE))
if not train:
# in training, history is taken care of in expreplay buffer
env = FrameStack(env, FRAME_HISTORY)
return env
class Model(DQNModel):
def __init__(self):
super(Model, self).__init__(IMAGE_SIZE, FRAME_HISTORY, METHOD, NUM_ACTIONS, GAMMA)
def _get_DQN_prediction(self, image):
""" image: [0,255]"""
image = image / 255.0
with argscope(Conv2D, nl=PReLU.symbolic_function, use_bias=True), \
argscope(LeakyReLU, alpha=0.01):
l = (LinearWrap(image)
# Nature architecture
.Conv2D('conv0', out_channel=32, kernel_shape=8, stride=4)
.Conv2D('conv1', out_channel=64, kernel_shape=4, stride=2)
.Conv2D('conv2', out_channel=64, kernel_shape=3)
# architecture used for the figure in the README, slower but takes fewer iterations to converge
# .Conv2D('conv0', out_channel=32, kernel_shape=5)
# .MaxPooling('pool0', 2)
# .Conv2D('conv1', out_channel=32, kernel_shape=5)
# .MaxPooling('pool1', 2)
# .Conv2D('conv2', out_channel=64, kernel_shape=4)
# .MaxPooling('pool2', 2)
# .Conv2D('conv3', out_channel=64, kernel_shape=3)
.FullyConnected('fc0', 512, nl=LeakyReLU)())
if self.method != 'Dueling':
Q = FullyConnected('fct', l, self.num_actions, nl=tf.identity)
else:
# Dueling DQN
V = FullyConnected('fctV', l, 1, nl=tf.identity)
As = FullyConnected('fctA', l, self.num_actions, nl=tf.identity)
Q = tf.add(As, V - tf.reduce_mean(As, 1, keep_dims=True))
return tf.identity(Q, name='Qvalue')
def get_config():
expreplay = ExpReplay(
predictor_io_names=(['state'], ['Qvalue']),
player=get_player(train=True),
state_shape=IMAGE_SIZE,
batch_size=BATCH_SIZE,
memory_size=MEMORY_SIZE,
init_memory_size=INIT_MEMORY_SIZE,
init_exploration=1.0,
update_frequency=UPDATE_FREQ,
history_len=FRAME_HISTORY
)
return TrainConfig(
data=QueueInput(expreplay),
model=Model(),
callbacks=[
ModelSaver(),
PeriodicTrigger(
RunOp(DQNModel.update_target_param, verbose=True),
every_k_steps=10000 // UPDATE_FREQ), # update target network every 10k steps
expreplay,
ScheduledHyperParamSetter('learning_rate',
[(60, 4e-4), (100, 2e-4)]),
ScheduledHyperParamSetter(
ObjAttrParam(expreplay, 'exploration'),
[(0, 1), (10, 0.1), (320, 0.01)], # 1->0.1 in the first million steps
interp='linear'),
PeriodicTrigger(Evaluator(
EVAL_EPISODE, ['state'], ['Qvalue'], get_player),
every_k_epochs=10),
HumanHyperParamSetter('learning_rate'),
],
steps_per_epoch=STEPS_PER_EPOCH,
max_epoch=1000,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--task', help='task to perform',
choices=['play', 'eval', 'train'], default='train')
parser.add_argument('--rom', help='atari rom', required=True)
parser.add_argument('--algo', help='algorithm',
choices=['DQN', 'Double', 'Dueling'], default='Double')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
ROM_FILE = args.rom
METHOD = args.algo
# set num_actions
NUM_ACTIONS = AtariPlayer(ROM_FILE).action_space.n
logger.info("ROM: {}, Num Actions: {}".format(ROM_FILE, NUM_ACTIONS))
if args.task != 'train':
assert args.load is not None
pred = OfflinePredictor(PredictConfig(
model=Model(),
session_init=get_model_loader(args.load),
input_names=['state'],
output_names=['Qvalue']))
if args.task == 'play':
play_n_episodes(get_player(viz=0.01), pred, 100)
elif args.task == 'eval':
eval_model_multithread(pred, EVAL_EPISODE, get_player)
else:
logger.set_logger_dir(
os.path.join('train_log', 'DQN-{}'.format(
os.path.basename(ROM_FILE).split('.')[0])))
config = get_config()
if args.load:
config.session_init = get_model_loader(args.load)
launch_train_with_config(config, SimpleTrainer())
| [
"ppwwyyxxc@gmail.com"
] | ppwwyyxxc@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.