blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c4b6a3f62693ae3839dab962a54b3148f679bc02 | 21c8e8fee35d736938d22bfd01d4f8aa0f81b79e | /app.py | 14a3dc6627e061c994804fbd9b2d09ae8cab479f | [
"MIT"
] | permissive | betatim/etherbrain | 320d4accdc789325b94feafab18aa6e49cea8564 | 910152032825861248cc300b0388c07112fff5db | refs/heads/master | 2021-01-16T21:22:31.676169 | 2016-02-11T17:32:28 | 2016-02-11T17:32:28 | 51,532,606 | 1 | 0 | null | 2016-02-11T17:33:01 | 2016-02-11T17:33:00 | null | UTF-8 | Python | false | false | 2,028 | py | import os
import requests
from github3 import login
from flask import (
Response,
Flask,
g,
request
)
GH_TOKEN = os.getenv("TOKEN")
FORK_ME = """<a href="https://github.com/etherpad-archive/etherbrain"><img style="position: absolute; top: 0; right: 0; border: 0;" src="https://camo.githubusercontent.com/365986a132ccd6a44c23a9169022c0b5c890c387/68747470733a2f2f73332e616d617a6f6e6177732e636f6d2f6769746875622f726962626f6e732f666f726b6d655f72696768745f7265645f6161303030302e706e67" alt="Fork me on GitHub" data-canonical-src="https://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png"></a>"""
app = Flask(__name__)
app.debug = True
@app.route('/moz/<path:path>/')
def moz_pad(path):
ether_path = "https://public.etherpad-mozilla.org/p/{}".format(path)
req = requests.get(ether_path + "/export/txt")
gh = login('etherbrain', token=GH_TOKEN)
r = gh.repository('etherpad-archive', 'etherpad-archive.github.io')
contents = r.contents(path='moz')
print(contents)
fname = path + ".md"
if contents is None or fname not in contents:
# create it for the first time
r.create_file("moz/{}.md".format(path),
'etherpad from {}'.format(ether_path),
content=req.content)
else:
# update the file
f = contents[fname]
f.update('updated etherpad from {}'.format(ether_path),
content=req.content)
return Response(
'Check out: <a href="http://etherpad-archive.github.io/moz/{path}.md"'
'>http://etherpad-archive.github.io/moz/{path}.md</a>'.format(path=path)
)
@app.route('/')
def index():
return Response("<html><head><title>Etherpad brain</title></head><body><h1>Hello I am the etherpad brain</h1>"
"<p>To archive https://public.etherpad-mozilla.org/p/XXX visit"
" https://etherbrain.herokuapp.com/moz/XXX/</p>{}</body></html>".format(FORK_ME))
if __name__ == "__main__":
app.run(debug=True)
| [
"betatim@gmail.com"
] | betatim@gmail.com |
ecc34043983b03b3988be9dcd00276282e219b79 | 41523dd4871e8ed1043d2b3ddf73417fcbdde209 | /day10/第三方模块.py | 160b4dccef65869bcc8a653e7c6817dce1eb9e80 | [] | no_license | WayneChen1994/Python1805 | 2aa1c611f8902b8373b8c9a4e06354c25f8826d6 | a168cd3b7749afc326ec4326db413378fd3677d5 | refs/heads/master | 2020-03-30T23:19:00.773288 | 2018-11-02T10:47:40 | 2018-11-02T10:47:40 | 151,697,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Wayne.Chen
'''
第三方的库都需要安装
第一种:使用pip安装
命令格式:pip install 库名
若发生错误,先更新错误,再切换网络
第二种:使用pycharm进行安装
'''
from PIL import Image
# 打开图片,生成一个image对象
im = Image.open('ppp.jpg')
# 从打开的图片中获取图片信息
# im.format:图片格式信息
# im.size:图片尺寸
print(im.format, im.size)
# 设置图片的尺寸,生成缩略图
im.thumbnail((500, 200))
# 另存为,参数一:图片名,参数二:图片格式
im.save('pppp.jpg', 'JPEG')
| [
"waynechen1994@163.com"
] | waynechen1994@163.com |
692ffcf5c4f607be9f55703706c8341c7ac328f9 | 16640092d62417c32677ee2f7c63a913c11de51a | /test.py | ba098b066ee0dc2ba43422cf0389de84cad9cdf9 | [] | no_license | reakain/rob538hw2 | 2cacd5ea0c394d262420093f31c50acd029322ff | 07249df900380353020be57ce8a4eebed904e904 | refs/heads/main | 2022-12-25T17:11:08.513206 | 2020-10-13T03:10:24 | 2020-10-13T03:10:24 | 303,574,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,769 | py | # from https://amunategui.github.io/reinforcement-learning/index.html
import numpy as np
import pylab as plt
# map cell to cell, add circular cell to goal point
points_list = [(0,1), (1,5), (5,6), (5,4), (1,2), (2,3), (2,7)]
goal = 7
import networkx as nx
G=nx.Graph()
G.add_edges_from(points_list)
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G,pos)
nx.draw_networkx_edges(G,pos)
nx.draw_networkx_labels(G,pos)
plt.show()
# how many points in graph? x points
MATRIX_SIZE = 8
# create matrix x*y
R = np.matrix(np.ones(shape=(MATRIX_SIZE, MATRIX_SIZE)))
R *= -1
# assign zeros to paths and 100 to goal-reaching point
for point in points_list:
print(point)
if point[1] == goal:
R[point] = 100
else:
R[point] = 0
if point[0] == goal:
R[point[::-1]] = 100
else:
# reverse of point
R[point[::-1]]= 0
# add goal point round trip
R[goal,goal]= 100
Q = np.matrix(np.zeros([MATRIX_SIZE,MATRIX_SIZE]))
# learning parameter
gamma = 0.8
initial_state = 1
def available_actions(state):
current_state_row = R[state,]
av_act = np.where(current_state_row >= 0)[1]
return av_act
available_act = available_actions(initial_state)
def sample_next_action(available_actions_range):
next_action = int(np.random.choice(available_act,1))
return next_action
action = sample_next_action(available_act)
def update(current_state, action, gamma):
max_index = np.where(Q[action,] == np.max(Q[action,]))[1]
if max_index.shape[0] > 1:
max_index = int(np.random.choice(max_index, size = 1))
else:
max_index = int(max_index)
max_value = Q[action, max_index]
Q[current_state, action] = R[current_state, action] + gamma * max_value
print('max_value', R[current_state, action] + gamma * max_value)
if (np.max(Q) > 0):
return(np.sum(Q/np.max(Q)*100))
else:
return (0)
update(initial_state, action, gamma)
# Training
scores = []
for i in range(700):
current_state = np.random.randint(0, int(Q.shape[0]))
available_act = available_actions(current_state)
action = sample_next_action(available_act)
score = update(current_state,action,gamma)
scores.append(score)
print ('Score:', str(score))
print("Trained Q matrix:")
print(Q/np.max(Q)*100)
# Testing
current_state = 0
steps = [current_state]
while current_state != 7:
next_step_index = np.where(Q[current_state,] == np.max(Q[current_state,]))[1]
if next_step_index.shape[0] > 1:
next_step_index = int(np.random.choice(next_step_index, size = 1))
else:
next_step_index = int(next_step_index)
steps.append(next_step_index)
current_state = next_step_index
print("Most efficient path:")
print(steps)
plt.plot(scores)
plt.show() | [
"reakain@users.noreply.github.com"
] | reakain@users.noreply.github.com |
f8c13f56bef005c37b573d17ed303454226ba230 | 7136e5242793b620fa12e9bd15bf4d8aeb0bfe7a | /examples/adspygoogle/dfp/v201101/get_lica.py | 121e4466b8d92878321c9c97d2374a5552ef0e28 | [
"Apache-2.0"
] | permissive | hockeyprincess/google-api-dfp-python | 534519695ffd26341204eedda7a8b50648f12ea9 | efa82a8d85cbdc90f030db9d168790c55bd8b12a | refs/heads/master | 2021-01-10T10:01:09.445419 | 2011-04-14T18:25:38 | 2011-04-14T18:25:38 | 52,676,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,958 | py | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets a line item creative association (LICA) by the line
item and creative id. To determine which line items exist, run
get_all_line_items.py. To determine which creatives exit, run
get_all_creatives.py."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.append(os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle.dfp.DfpClient import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service. By default, the request is always made against
# the sandbox environment.
lica_service = client.GetLineItemCreativeAssociationService(
'https://sandbox.google.com', 'v201101')
# Set line item and creative id to use to retrieve the LICA.
line_item_id = 'INSERT_LINE_ITEM_ID_HERE'
creative_id = 'INSERT_CREATIVE_ID_HERE'
# Get LICA.
lica = lica_service.GetLineItemCreativeAssociation(line_item_id, creative_id)[0]
# Display results.
print ('LICA with line item id \'%s\', creative id \'%s\', and status '
'\'%s\' was found.' % (lica['lineItemId'], lica['creativeId'],
lica['status']))
| [
"api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138"
] | api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138 |
048ac96b3f749f1c876f5b098fc2d9c763c14bfd | c418bd9d730bc17653611da7f0642bdd25cba65f | /djangosite/myapp/models.py | 24937ec6ecd5ae57cc064366c022229598fdac16 | [] | no_license | ErDeepakSingh/Ajax-State-City | ae18a4f4b8ef8e90932d8aed74553897d7ac9b3b | 72a31424bd9402ef2c76198ee80934ac399fccf9 | refs/heads/master | 2020-08-16T02:29:36.081445 | 2019-10-16T02:51:22 | 2019-10-16T02:51:22 | 215,443,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from django.db import models
# Create your models here.
class Student(models.Model):
name = models.CharField(max_length=25)
email = models.EmailField(unique=True)
joined = models.DateTimeField(auto_now_add=True)
phone = models.CharField(max_length=10, default='')
password = models.CharField(max_length=16, default='')
def __str__(self):
return self.email + " - " + self.phone
| [
"deepakthakur755@gmail.com"
] | deepakthakur755@gmail.com |
82dac2f11d268d0f7a2d30e10b1a6ca670013859 | 9c4e02ba5201794a4c5cbff548db1be7c87409c1 | /venv/lib/python3.9/site-packages/pygments/lexers/trafficscript.py | 67ecd243cb3c15db119f07cd6007ebb986f19d42 | [
"MIT",
"Apache-2.0"
] | permissive | ClassWizard/PodLockParser | 4faf4679d404158b3cf2b1ceb4faabca461b0008 | 84f6d3fced521849657d21ae4cb9681f5897b957 | refs/heads/master | 2022-12-23T20:39:48.096729 | 2022-02-08T09:49:01 | 2022-02-08T09:49:01 | 167,668,617 | 2 | 1 | MIT | 2022-12-14T10:01:41 | 2019-01-26T08:50:35 | Python | UTF-8 | Python | false | false | 1,512 | py | """
pygments.lexers.trafficscript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for RiverBed's TrafficScript (RTS) language.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import String, Number, Name, Keyword, Operator, Text, Comment
__all__ = ['RtsLexer']
class RtsLexer(RegexLexer):
"""
For `Riverbed Stingray Traffic Manager <http://www.riverbed.com/stingray>`_
.. versionadded:: 2.1
"""
name = 'TrafficScript'
aliases = ['trafficscript', 'rts']
filenames = ['*.rts']
tokens = {
'root' : [
(r"'(\\\\|\\[^\\]|[^'\\])*'", String),
(r'"', String, 'escapable-string'),
(r'(0x[0-9a-fA-F]+|\d+)', Number),
(r'\d+\.\d+', Number.Float),
(r'\$[a-zA-Z](\w|_)*', Name.Variable),
(r'(if|else|for(each)?|in|while|do|break|sub|return|import)', Keyword),
(r'[a-zA-Z][\w.]*', Name.Function),
(r'[-+*/%=,;(){}<>^.!~|&\[\]\?\:]', Operator),
(r'(>=|<=|==|!=|'
r'&&|\|\||'
r'\+=|.=|-=|\*=|/=|%=|<<=|>>=|&=|\|=|\^=|'
r'>>|<<|'
r'\+\+|--|=>)', Operator),
(r'[ \t\r]+', Text),
(r'#[^\n]*', Comment),
],
'escapable-string' : [
(r'\\[tsn]', String.Escape),
(r'[^"]', String),
(r'"', String, '#pop'),
],
}
| [
"chenlongwei@camera360.com"
] | chenlongwei@camera360.com |
ff9be30ad7a3fb60856edfe43f45d57c5a03eb04 | b9a2097b1ff526f0f980cb44f321ecdecc071baf | /backend/nwh_elkhart_metrics_26614/urls.py | 8e7b3ab44b091f930ad0b1b58e0f93406437830a | [] | no_license | crowdbotics-apps/nwh-elkhart-metrics-26614 | ce08c984d6c939b7f7cd5158b5c39fe37be94dcc | e86088482281f83fe789ce0b492e76981df1c08c | refs/heads/master | 2023-05-01T08:17:44.464562 | 2021-05-12T18:42:43 | 2021-05-12T18:42:43 | 366,794,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | """nwh_elkhart_metrics_26614 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("chat.api.v1.urls")),
path("chat/", include("chat.urls")),
path("api/v1/", include("chat_user_profile.api.v1.urls")),
path("chat_user_profile/", include("chat_user_profile.urls")),
path("home/", include("home.urls")),
path("api/v1/", include("users.api.v1.urls")),
]
admin.site.site_header = "NWH Elkhart Metrics"
admin.site.site_title = "NWH Elkhart Metrics Admin Portal"
admin.site.index_title = "NWH Elkhart Metrics Admin"
# swagger
api_info = openapi.Info(
title="NWH Elkhart Metrics API",
default_version="v1",
description="API documentation for NWH Elkhart Metrics App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name="index.html"))]
urlpatterns += [
re_path(r"^(?:.*)/?$", TemplateView.as_view(template_name="index.html"))
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
94cf66b41704c274309334be3fe3b838b8a69b17 | 304a2c58a1bd0713b876d093a39a21f3fc7bd3d1 | /skimage/morphology/greyreconstruct.py | 9e447800d30f9549466eb9c8f628a5c0124ea194 | [
"BSD-3-Clause"
] | permissive | ludwigschwardt/scikits-image | 72042e548aa9004d94dbb3da518134be28ba0f4b | 571151958f94842c642f0a17b73968757326e672 | refs/heads/master | 2023-09-05T02:28:15.470227 | 2012-08-27T10:16:40 | 2012-08-27T10:16:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,158 | py | """
This morphological reconstruction routine was adapted from CellProfiler, code
licensed under both GPL and BSD licenses.
Website: http://www.cellprofiler.org
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2011 Broad Institute
All rights reserved.
Original author: Lee Kamentsky
"""
import numpy as np
from skimage.filter._rank_order import rank_order
def reconstruction(seed, mask, method='dilation', selem=None, offset=None):
"""Perform a morphological reconstruction of an image.
Morphological reconstruction by dilation is similar to basic morphological
dilation: high-intensity values will replace nearby low-intensity values.
The basic dilation operator, however, uses a structuring element to
determine how far a value in the input image can spread. In contrast,
reconstruction uses two images: a "seed" image, which specifies the values
that spread, and a "mask" image, which gives the maximum allowed value at
each pixel. The mask image, like the structuring element, limits the spread
of high-intensity values. Reconstruction by erosion is simply the inverse:
low-intensity values spread from the seed image and are limited by the mask
image, which represents the minimum allowed value.
Alternatively, you can think of reconstruction as a way to isolate the
connected regions of an image. For dilation, reconstruction connects
regions marked by local maxima in the seed image: neighboring pixels
less-than-or-equal-to those seeds are connected to the seeded region.
Local maxima with values larger than the seed image will get truncated to
the seed value.
Parameters
----------
seed : ndarray
The seed image (a.k.a. marker image), which specifies the values that
are dilated or eroded.
mask : ndarray
The maximum (dilation) / minimum (erosion) allowed value at each pixel.
method : {'dilation'|'erosion'}
Perform reconstruction by dilation or erosion. In dilation (or
erosion), the seed image is dilated (or eroded) until limited by the
mask image. For dilation, each seed value must be less than or equal
to the corresponding mask value; for erosion, the reverse is true.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
Returns
-------
reconstructed : ndarray
The result of morphological reconstruction.
Examples
--------
>>> import numpy as np
>>> from skimage.morphology import reconstruction
First, we create a sinusoidal mask image w/ peaks at middle and ends.
>>> x = np.linspace(0, 4 * np.pi)
>>> y_mask = np.cos(x)
Then, we create a seed image initialized to the minimum mask value (for
reconstruction by dilation, min-intensity values don't spread) and add
"seeds" to the left and right peak, but at a fraction of peak value (1).
>>> y_seed = y_mask.min() * np.ones_like(x)
>>> y_seed[0] = 0.5
>>> y_seed[-1] = 0
>>> y_rec = reconstruction(y_seed, y_mask)
The reconstructed image (or curve, in this case) is exactly the same as the
mask image, except that the peaks are truncated to 0.5 and 0. The middle
peak disappears completely: Since there were no seed values in this peak
region, its reconstructed value is truncated to the surrounding value (-1).
As a more practical example, we try to extract the bright features of an
image by subtracting a background image created by reconstruction.
>>> y, x = np.mgrid[:20:0.5, :20:0.5]
>>> bumps = np.sin(x) + np.sin(y)
To create the background image, set the mask image to the original image,
and the seed image to the original image with an intensity offset, `h`.
>>> h = 0.3
>>> seed = bumps - h
>>> background = reconstruction(seed, bumps)
The resulting reconstructed image looks exactly like the original image,
but with the peaks of the bumps cut off. Subtracting this reconstructed
image from the original image leaves just the peaks of the bumps
>>> hdome = bumps - background
This operation is known as the h-dome of the image and leaves features
of height `h` in the subtracted image.
Notes
-----
The algorithm is taken from:
[1] Robinson, "Efficient morphological reconstruction: a downhill filter",
Pattern Recognition Letters 25 (2004) 1759-1767.
Applications for greyscale reconstruction are discussed in:
[2] Vincent, L., "Morphological Grayscale Reconstruction in Image Analysis:
Applications and Efficient Algorithms", IEEE Transactions on Image
Processing (1993)
[3] Soille, P., "Morphological Image Analysis: Principles and Applications",
Chapter 6, 2nd edition (2003), ISBN 3540429883.
"""
assert tuple(seed.shape) == tuple(mask.shape)
if method == 'dilation' and np.any(seed > mask):
raise ValueError("Intensity of seed image must be less than that "
"of the mask image for reconstruction by dilation.")
elif method == 'erosion' and np.any(seed < mask):
raise ValueError("Intensity of seed image must be greater than that "
"of the mask image for reconstruction by erosion.")
try:
from ._greyreconstruct import reconstruction_loop
except ImportError:
raise ImportError("_greyreconstruct extension not available.")
if selem is None:
selem = np.ones([3] * seed.ndim, dtype=bool)
else:
selem = selem.copy()
if offset == None:
if not all([d % 2 == 1 for d in selem.shape]):
ValueError("Footprint dimensions must all be odd")
offset = np.array([d // 2 for d in selem.shape])
# Cross out the center of the selem
selem[[slice(d, d + 1) for d in offset]] = False
# Make padding for edges of reconstructed image so we can ignore boundaries
padding = (np.array(selem.shape) / 2).astype(int)
dims = np.zeros(seed.ndim + 1, dtype=int)
dims[1:] = np.array(seed.shape) + 2 * padding
dims[0] = 2
inside_slices = [slice(p, -p) for p in padding]
# Set padded region to minimum image intensity and mask along first axis so
# we can interleave image and mask pixels when sorting.
if method == 'dilation':
pad_value = np.min(seed)
elif method == 'erosion':
pad_value = np.max(seed)
images = np.ones(dims) * pad_value
images[[0] + inside_slices] = seed
images[[1] + inside_slices] = mask
# Create a list of strides across the array to get the neighbors within
# a flattened array
value_stride = np.array(images.strides[1:]) / images.dtype.itemsize
image_stride = images.strides[0] / images.dtype.itemsize
selem_mgrid = np.mgrid[[slice(-o, d - o)
for d, o in zip(selem.shape, offset)]]
selem_offsets = selem_mgrid[:, selem].transpose()
nb_strides = np.array([np.sum(value_stride * selem_offset)
for selem_offset in selem_offsets], np.int32)
images = images.flatten()
# Erosion goes smallest to largest; dilation goes largest to smallest.
index_sorted = np.argsort(images).astype(np.int32)
if method == 'dilation':
index_sorted = index_sorted[::-1]
# Make a linked list of pixels sorted by value. -1 is the list terminator.
prev = -np.ones(len(images), np.int32)
next = -np.ones(len(images), np.int32)
prev[index_sorted[1:]] = index_sorted[:-1]
next[index_sorted[:-1]] = index_sorted[1:]
# Cython inner-loop compares the rank of pixel values.
if method == 'dilation':
value_rank, value_map = rank_order(images)
elif method == 'erosion':
value_rank, value_map = rank_order(-images)
value_map = -value_map
start = index_sorted[0]
reconstruction_loop(value_rank, prev, next, nb_strides, start, image_stride)
# Reshape reconstructed image to original image shape and remove padding.
rec_img = value_map[value_rank[:image_stride]]
rec_img.shape = np.array(seed.shape) + 2 * padding
return rec_img[inside_slices]
| [
"tsyu80@gmail.com"
] | tsyu80@gmail.com |
97baeed7c56a0db84cf7856d975f4a404250a4bf | f321c54e5745a21e41842c1cdccaefa5256d918c | /magnetos/utils/string_utils.py | e285d77142c2a32673e542aecca927e326123fca | [
"MIT"
] | permissive | gitshaozhong/magnetos | 5104f90782ac03a2b0b5c86b7333d72b1c27338c | f48dcd7450a46d619dcbe64c11c9aa1c119cd307 | refs/heads/master | 2022-01-05T10:06:04.193597 | 2019-07-12T00:54:05 | 2019-07-12T00:54:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # -*- coding: utf-8 -*-
# Created by restran on 2017/9/26
from __future__ import unicode_literals, absolute_import
def fixed_length_split(data, width):
"""
固定长度分割字符串
:param data:
:param width:
:return:
"""
# 使用正则的方法
# import re
# split = re.findall(r'.{%s}' % width, string)
return [data[x: x + width] for x in range(0, len(data), width)]
| [
"grestran@gmail.com"
] | grestran@gmail.com |
1b5ec767df5eb39a49ccdf40dca40eea62760f90 | 0fa82ccc0b93944c4cbb8255834b019cf16d128d | /2020/TopNBuzzWords.py | ace88f6781b44057f221aa1f992be9d1b7504886 | [] | no_license | Akashdeepsingh1/project | 6ad477088a3cae2d7eea818a7bd50a2495ce3ba8 | bdebc6271b39d7260f6ab5bca37ab4036400258f | refs/heads/master | 2022-12-13T23:09:35.782820 | 2020-08-27T14:22:37 | 2020-08-27T14:22:37 | 279,722,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,113 | py | '''
You work on a team whose job is to understand the most sought after toys for the holiday season. A teammate of yours has built a webcrawler that extracts a list of quotes about toys from different articles. You need to take these quotes and identify which toys are mentioned most frequently. Write an algorithm that identifies the top N toys out of a list of quotes and list of toys.
Your algorithm should output the top N toys mentioned most frequently in the quotes.
Input:
The input to the function/method consists of five arguments:
numToys, an integer representing the number of toys
topToys, an integer representing the number of top toys your algorithm needs to return;
toys, a list of strings representing the toys,
numQuotes, an integer representing the number of quotes about toys;
quotes, a list of strings that consists of space-sperated words representing articles about toys
Output:
Return a list of strings of the most popular N toys in order of most to least frequently mentioned
Note:
The comparison of strings is case-insensitive. If the value of topToys is more than the number of toys, return the names of only the toys mentioned in the quotes. If toys are mentioned an equal number of times in quotes, sort alphabetically.
Example 1:
Input:
numToys = 6
topToys = 2
toys = ["elmo", "elsa", "legos", "drone", "tablet", "warcraft"]
numQuotes = 6
quotes = [
"Elmo is the hottest of the season! Elmo will be on every kid's wishlist!",
"The new Elmo dolls are super high quality",
"Expect the Elsa dolls to be very popular this year, Elsa!",
"Elsa and Elmo are the toys I'll be buying for my kids, Elsa is good",
"For parents of older kids, look into buying them a drone",
"Warcraft is slowly rising in popularity ahead of the holiday season"
];
Output:
["elmo", "elsa"]
'''
def solution(quotes, numToys,topToys, toys):
from collections import defaultdict
from heapq import heapify,heappush,nlargest
import re
working_dic = defaultdict(int)
for line in quotes:
temp = re.sub(r'''[,!.;'"]+'''," ",line).lower().split()
for word in temp:
if str(word) in toys:
working_dic[word]+=1
import operator
sorted_d = sorted (working_dic.items (), key=operator.itemgetter (1))
working_list = []
heapify(working_list)
for k,v in working_dic.items():
heappush(working_list,(v,k))
print('{} {}'.format(k,v))
t = nlargest(topToys,working_list)
final_list = []
for each in t:
final_list.append(each[1])
return final_list
numToys = 6
topToys = 2
toys = ["elmo", "elsa", "legos", "drone", "tablet", "warcraft"]
numQuotes = 6
quotes = [
"Elmo is the hottest of the season! Elmo will be on every kid's wishlist!",
"The new Elmo dolls are super high quality",
"Expect the Elsa dolls to be very popular this year, Elsa!",
"Elsa and Elmo are the toys I'll be buying for my kids, Elsa is good",
"For parents of older kids, look into buying them a drone",
"Warcraft is slowly rising in popularity ahead of the holiday season"
]
print (solution (quotes, numToys, topToys, toys)) | [
"Akashdeep_S@Dell.com"
] | Akashdeep_S@Dell.com |
631d16757e7af9733f9944dc7b6eabb43ed2f47e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02691/s319336614.py | 171ddfd157899a75a6f611fe7687282c54e7f3f4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | import sys
from functools import lru_cache
from collections import defaultdict
inf = float('inf')
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10**6)
def input(): return sys.stdin.readline().rstrip()
def read():
return int(readline())
def reads():
return map(int, readline().split())
x=read()
a=list(reads())
dic=[]
dic2=defaultdict(int)
for i in range(x):
dic.append(i+a[i])
dic2[i-a[i]]+=1
ans=0
#print(dic,dic2)
for i in dic:
ans+=dic2[i]
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
49ac678e7399622d7dbfa5d8fa346354eeae6c27 | c73fc798764f40ea6fa466a573fb01223e367ce3 | /sorting_algo/bubble_sort13_08_3.py | ec228d9d7c593673db252adc5d40e3d6bc431de2 | [] | no_license | mohitsh/python_work | b1385f62104aa6b932f5452ca5c2421526345455 | 223a802dea5cdb73f44a159856c7432983655668 | refs/heads/master | 2020-04-24T00:34:15.427060 | 2018-08-21T19:12:07 | 2018-08-21T19:12:07 | 37,491,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py |
def bubble_sort(alist):
for passnum in range(len(alist)-1,0,-1):
for i in range(passnum):
if alist[i] > alist[i+1]:
temp = alist[i]
alist[i] = alist[i+1]
alist[i+1] = temp
return alist
alist = [9,8,7,6,5,4,3,2,1,0]
print alist
print bubble_sort(alist)
| [
"mohitsh114@gmail.com"
] | mohitsh114@gmail.com |
5fe4808fdc7b9720b6dae56130107ff7859b3d6b | 6ceea2578be0cbc1543be3649d0ad01dd55072aa | /src/fipy/solvers/trilinos/preconditioners/jacobiPreconditioner.py | 3e0dbb4faff0fe48dc559db8e9f7fb07afb4f0e3 | [
"LicenseRef-scancode-public-domain"
] | permissive | regmi/fipy | 57972add2cc8e6c04fda09ff2faca9a2c45ad19d | eb4aacf5a8e35cdb0e41beb0d79a93e7c8aacbad | refs/heads/master | 2020-04-27T13:51:45.095692 | 2010-04-09T07:32:42 | 2010-04-09T07:32:42 | 602,099 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,916 | py | #!/usr/bin/env python
##
# -*-Pyth-*-
# ###################################################################
# FiPy - Python-based finite volume PDE solver
#
# FILE: "jacobiPreconditioner.py"
#
# Author: Jonathan Guyer <guyer@nist.gov>
# Author: Daniel Wheeler <daniel.wheeler@nist.gov>
# Author: James Warren <jwarren@nist.gov>
# Author: Maxsim Gibiansky <maxsim.gibiansky@nist.gov>
# mail: NIST
# www: http://www.ctcms.nist.gov/fipy/
#
# ========================================================================
# This software was developed at the National Institute of Standards
# and Technology by employees of the Federal Government in the course
# of their official duties. Pursuant to title 17 Section 105 of the
# United States Code this software is not subject to copyright
# protection and is in the public domain. FiPy is an experimental
# system. NIST assumes no responsibility whatsoever for its use by
# other parties, and makes no guarantees, expressed or implied, about
# its quality, reliability, or any other characteristic. We would
# appreciate acknowledgement if the software is used.
#
# This software can be redistributed and/or modified freely
# provided that any derivative works bear some notice that they are
# derived from it, and any modified versions bear some notice that
# they have been modified.
# ========================================================================
#
# ###################################################################
##
__docformat__ = 'restructuredtext'
from PyTrilinos import AztecOO
from fipy.solvers.trilinos.preconditioners.preconditioner import Preconditioner
class JacobiPreconditioner(Preconditioner):
"""
Jacobi Preconditioner for Trilinos solvers.
"""
def _applyToSolver(self, solver, matrix):
solver.SetAztecOption(AztecOO.AZ_precond, AztecOO.AZ_Jacobi)
| [
"regmisk@gmail.com"
] | regmisk@gmail.com |
2ef23df228ff2888553798a422f860c67c12f531 | 9b4de05054f37a65dce49857fb6a809a370b23ca | /gd/migrations/0015_auto_20171223_1531.py | 689f60c8c2b35e98aa6e6472d8595be6fc32c8c3 | [] | no_license | susahe/gis | f6b03b8f23abf7ca22c0069a4cdf603bfe879808 | 6b8d433cd5f672994ac138c1b656136425d0c345 | refs/heads/master | 2021-05-12T01:50:12.862559 | 2018-01-27T02:25:31 | 2018-01-27T02:25:31 | 117,569,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,613 | py | # Generated by Django 2.0 on 2017-12-23 15:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gd', '0014_auto_20171222_2045'),
]
operations = [
migrations.AlterField(
model_name='person',
name='p_birthdate',
field=models.DateField(default='1/1/1977', verbose_name='උපන්දිනය'),
),
migrations.AlterField(
model_name='person',
name='p_donation',
field=models.CharField(choices=[('SD', 'සමෘද්ධි සහනාධාරය'), ('PD', 'මහජන ආධාර'), ('DD', 'රෝගාධාර'), ('SD', 'ශිෂ්\u200dයාධාර'), ('ED', 'වැඩිහිටි ආධාර')], default='SD', max_length=20, verbose_name='රජයෙන් ලබන ආධාර'),
),
migrations.AlterField(
model_name='person',
name='p_edu',
field=models.CharField(choices=[('PS', 'පාසල් යාමට පෙර'), ('PR', 'පෙර පාසැල්'), ('OF', '1-5 ශ්\u200dරේණිය දක්වා'), ('FO', '5 සිට සා/පෙළ දක්වා'), ('OP', 'සාමන්\u200dය පෙළ සමත්'), ('UA', 'උසස් පෙළ දක්වා'), ('AP', 'උසස් පෙළ සමත්'), ('DG', 'උපාධි හා ඊට ඉහල'), ('NS', 'කිසිදා පසැල් නොගිය')], default='OP', max_length=10, verbose_name='අධ්\u200dයාපන සුදුසුකම්'),
),
]
| [
"sumudu.susahe@gmail.com"
] | sumudu.susahe@gmail.com |
16510b4dbee33035e4fd00ce92137fff7639b46b | 9c74814f9bf90529d5ccd7a1dcebe062235ca67c | /third_party/saltedge/test/test_oauth_reconnect_request_body.py | 1c76997806170372a731a2fa2c93794dbc1cbd38 | [
"MIT"
] | permissive | ltowarek/budget-supervisor | 63196fe7cef78f0f54a25891d65870745cc7cf49 | 618e01e15a7a76ed870dafccda399720a02b068b | refs/heads/master | 2021-12-15T06:41:37.531689 | 2021-11-08T19:51:58 | 2021-11-08T19:53:03 | 26,971,315 | 1 | 0 | MIT | 2021-07-04T10:56:24 | 2014-11-21T17:56:36 | Python | UTF-8 | Python | false | false | 1,000 | py | # coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.oauth_reconnect_request_body import OauthReconnectRequestBody # noqa: E501
from swagger_client.rest import ApiException
class TestOauthReconnectRequestBody(unittest.TestCase):
"""OauthReconnectRequestBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testOauthReconnectRequestBody(self):
"""Test OauthReconnectRequestBody"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.oauth_reconnect_request_body.OauthReconnectRequestBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"lukasz.towarek@gmail.com"
] | lukasz.towarek@gmail.com |
2439b6370f69f389a08685af7bde72b0f33ded1f | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/ac7e62ec08d10df30b76ffd035b8d449a1a097d9-<target_login>-bug.py | 51ff00075ca0161b03de61784815c1b6a53ae43d | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | def target_login(module, target):
node_auth = module.params['node_auth']
node_user = module.params['node_user']
node_pass = module.params['node_pass']
if node_user:
params = [('node.session.auth.authmethod', node_auth), ('node.session.auth.username', node_user), ('node.session.auth.password', node_pass)]
for (name, value) in params:
cmd = ('%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value))
(rc, out, err) = module.run_command(cmd)
if (rc > 0):
module.fail_json(cmd=cmd, rc=rc, msg=err)
cmd = ('%s --mode node --targetname %s --login' % (iscsiadm_cmd, target))
(rc, out, err) = module.run_command(cmd)
if (rc > 0):
module.fail_json(cmd=cmd, rc=rc, msg=err) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
c2a810bd301d8844f561beed00989c879eb6d363 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02787/s385554448.py | eb0de844cf83ff51298317f61169298255d1ec6e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | H,N = map(int,input().split())
inf = 1000000000
dp = [inf for _ in range(20001)]
magics = []
dp[0] = 0
for i in range(N):
magic = list(map(int,input().split()))
magics.append(magic)
for j in range(10001):
for k in magics:
dp[j+k[0]] = min(dp[j]+k[1],dp[j+k[0]])
ans = dp[H:]
print(min(ans)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
980d5b518857f99a7f02371be9dd4c0741d704b7 | e53b7bbcea1a6f06175a9f14e31d5725fe80e804 | /Question_100/Q11_MeanFilter.py | a8d91f259b3b5bae65693816b60b3a60b2e928ac | [] | no_license | Zpadger/ObjectDetection | 5777c8d78c71dca1af6bccf25b01288dca7100c3 | aa0193a38f3d5c3a318501c3a59e89b73d3e244b | refs/heads/master | 2020-08-16T02:58:45.412713 | 2019-12-14T08:18:51 | 2019-12-14T08:18:51 | 215,446,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | #均值滤波
import cv2
import numpy as np
#read image
img=cv2.imread("imori.jpg")
h,w,c=img.shape
#mean filter
K_size=3
#zero padding
pad=K_size//2
out=np.zeros((h+pad*2,w+pad*2,c),dtype=np.float)
out[pad:pad+h,pad:pad+w]=img.copy().astype(np.float)
tmp=out.copy()
for y in range(h):
for x in range(w):
for c in range(c):
out[pad+y,pad+x,c]=np.mean(tmp[y:y+K_size,x:x+K_size,c]) #取所有元素平均值
out=out[pad:pad+h,pad:pad+w].astype(np.uint8)
#save result
cv2.imwrite("out.jpg",out)
cv2.imshow("result",out)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | Zpadger.noreply@github.com |
9534e203e71c3a4144e688ecb9c1a247d5e1c057 | f7d28900c8b49748d7b31f3b8dd384042f07fb36 | /misc/glp_stats/collect_glyph_stats.py | 3fd8c44a1fbb977f719fa9b026e673da5d53f1e9 | [] | no_license | TeluguOCR/datagen_initio | 0f2f4823a08bca24a1012fbd0508cdf12ed01dc1 | f405e91f66c770efa6ae94a71430fcec6bae449f | refs/heads/master | 2021-01-10T15:12:45.431308 | 2015-12-06T00:24:02 | 2015-12-06T00:24:02 | 47,477,987 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,080 | py | #! /usr/bin/python
# -*- coding: utf-8 -*-
from PIL import Image
import sys, os, re
if len(sys.argv) < 2:
print('Usage: ' + sys.argv[0] + ' <Directory>/ \n'
'This Program finds the stats of images of each glyph class'
'Directory is location of the directories containing image files for each glyph')
sys.exit()
dirs_dir = sys.argv[1]
if dirs_dir[-1] != '/':
dirs_dir += '/'
# Akshar_IT_4004018_-5_-28_-4_-27_-3_-26_-6_-29
# Font_Style_ID_T_B_T_B*
def SplitFileName(filename):
m = re.match('(.+?)_(..)_.+?(_.+_.+).tif', filename)
font = m.group(1)
style = m.group(2)
try:
dtbs = map(int, m.group(3).split('_')[1:])
except ValueError:
print filename
dtbs = []
dtbpairs = [(dtbs[i], dtbs[i+1]) for i in range(0, len(dtbs), 2)]
return font, style, dtbpairs
out_file = open('/tmp/' + dirs_dir[:-1].replace("/","_") + ".csv", 'w')
out_file.write("char font style wd ht xht normtop normbot normwd normht\n")
out_dir = '/tmp/avgs/'
if not os.path.exists(out_dir): os.makedirs(out_dir)
NMXHT = 16 # This is the normalised height of the letter x (or ja in Telugu)
NMTOP = int(1.1 * NMXHT)
NMBOT = int(1.3 * NMXHT)
NMWID = 5 * NMXHT
NMHIT = NMTOP + NMXHT + NMBOT
idir = 0
for dirpath, dirnames, filenames in os.walk(dirs_dir):
print idir, dirpath
idir += 1
big_im = Image.new("L", (NMWID, NMHIT), "white")
big_im.load()
char = os.path.basename(dirpath)
nimgs = 0
for filename in filenames:
# Sanity Checks and open
if filename[-4:] != '.tif':
print filename
continue
try:
full_path = os.path.join(dirpath, filename)
except NameError:
print dirpath, filename
raise
# Open image and process
im = Image.open(full_path)
wd, ht = im.size
font, style, dtbpairs = SplitFileName(filename)
for dt, db in dtbpairs:
xht = dt + ht - db
scalef = float(NMXHT)/xht
normtop = int(scalef * dt)
normbot = int(scalef * db) + NMXHT
normwd = int(scalef * wd)
normht = int(scalef * ht)
# Write the stats to a file
line = " ".join(map(str, (char, font, style, wd, ht, xht, normtop, normbot, normwd, normht)))
out_file.write(line+"\n")
break
# Scale and blend to get average
#print nimgs
try:
nimgs = nimgs + 1
im.load()
im = im.convert('L')
im = im.resize((normwd, normht))
im2 = Image.new("L", (NMWID, NMHIT), "white")
im2.load()
im2.paste(im, (0, NMTOP + normtop))
im2.load()
big_im = Image.blend(big_im, im2, 1./nimgs)
except:
raise
print char, nimgs, big_im.size, im2.size
continue
try:
big_im.save(out_dir + char + '.tif', 'TIFF')
except:
pass
out_file.close()
| [
"rakeshvar@gmail.com"
] | rakeshvar@gmail.com |
0ffce78daacdd1e2459e140c917feb0bfcac0095 | bd71b063f13958e07c9e16cd171d3fc0e1c58e4d | /0x0F-python-object_relational_mapping/11-model_state_insert.py | 293b9e47a1719bc9fa3f2594d981cfa01588d16b | [] | no_license | feliciahsieh/holbertonschool-higher_level_programming | 2aecd291f85fe69ab11331bb2d5372c6d67e1af6 | 017e8b87f9d8967b55ccc68ed30921572d4ddb65 | refs/heads/master | 2021-01-01T20:46:36.901665 | 2019-04-10T18:24:59 | 2019-04-10T18:24:59 | 98,931,138 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | #!/usr/bin/python3
"""
Add the State object Louisiana to database hbtn_0e_6_usa
"""
import sys
from model_state import Base, State
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
if __name__ == "__main__":
Base = declarative_base()
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(
sys.argv[1], sys.argv[2], sys.argv[3]))
Session = sessionmaker(bind=engine)
session = Session()
newState = State(name='Louisiana')
session.add(newState)
session.commit()
myState = session.query(State).filter(State.name == 'Louisiana').first()
print("{}".format(myState.id))
session.close()
| [
"felicia@tixwriteoff.com"
] | felicia@tixwriteoff.com |
3c44f5b036a333f3e5fc9e2804a38f9c2fdd069b | 061684e59ba5c816419f763a25629af987f60d52 | /CUDATest/test_code/__init__.py | e5225e0b12cc6e585d90b50fe277e7cdd23f4c17 | [] | no_license | wangyouan/PythonTest | 8d798fc5cde3ecaeb64301c3290fe51ea8577523 | 62177829b81e918cadb4a24527c4cdcaff734d7d | refs/heads/master | 2021-06-17T11:18:11.973935 | 2017-03-26T07:07:18 | 2017-03-26T07:07:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Project: PythonTest
# File name: __init__.py
# Author: warn
# Date: 2017/3/25 | [
"wangyouan0629@hotmail.com"
] | wangyouan0629@hotmail.com |
a1df8dd1d107a12098b59d231076994ca9958a2d | 16807220b95bf9a559b97ec0de16665ff31823cb | /lcd/screens/exitscreen.py | 52556eede378c20732b65cfc92504200ed9657d1 | [
"BSD-3-Clause"
] | permissive | cuauv/software | 7263df296e01710cb414d340d8807d773c3d8e23 | 5ad4d52d603f81a7f254f365d9b0fe636d03a260 | refs/heads/master | 2021-12-22T07:54:02.002091 | 2021-11-18T01:26:12 | 2021-11-18T02:37:55 | 46,245,987 | 76 | 34 | null | 2016-08-03T05:31:00 | 2015-11-16T02:02:36 | C++ | UTF-8 | Python | false | false | 506 | py | import cairo
from screen import Screen
class ExitScreen(Screen):
def get_name(self):
return "exit"
def draw(self, cr):
cr.set_source_rgb(1.0, 0, 0)
cr.select_font_face("FreeSans", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
cr.set_font_size(150)
cr.move_to(10, 150)
cr.show_text("Bye!")
cr.set_source_rgb(1.0, 1.0, 1.0)
cr.set_font_size(25)
cr.move_to(15, 230)
cr.show_text("Please wait for system halt.")
| [
"leader@cuauv.org"
] | leader@cuauv.org |
32661b4325599ee456d1c452634fb18c9f48db6e | 62a5beed83b968fb5b2082a453744bb0fe79f3f2 | /ch04/negative_sampling_layer.py | 4bef31b3c270418dcf048aabbbadfc7483bdc585 | [] | no_license | show2214/deep-learning-from-scratch-2 | 05c8515f7c00947387661a05005f2fd00cb0543b | 2deb28e68d6e0281aebf2df03c619299591d0660 | refs/heads/master | 2023-08-09T12:40:26.877054 | 2021-09-15T04:43:42 | 2021-09-15T04:43:42 | 403,802,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,402 | py | import sys
sys.path.append('..')
from common.np import * # import numpy as np
from common.layers import Embedding, SigmoidWithLoss
import collections
class EmbeddingDot:
def __init__(self, W):
self.embed = Embedding(W)
self.params = self.embed.params
self.grads = self.embed.grads
self.cache = None
def forward(self, h, idx):
target_W = self.embed.forward(idx)
out = np.sum(target_W * h, axis=1)
self.cache = (h, target_W)
return out
def backward(self, dout):
h, target_W = self.cache
dout = dout.reshape(dout.shape[0], 1)
dtarget_W = dout * h
self.embed.backward(dtarget_W)
dh = dout * target_W
return dh
class UnigramSampler:
def __init__(self, corpus, power, sample_size):
self.sample_size = sample_size
self.vocab_size = None
self.word_p = None
counts = collections.Counter()
for word_id in corpus:
counts[word_id] += 1
vocab_size = len(counts)
self.vocab_size = vocab_size
self.word_p = np.zeros(vocab_size)
for i in range(vocab_size):
self.word_p[i] = counts[i]
self.word_p = np.power(self.word_p, power)
self.word_p /= np.sum(self.word_p)
def get_negative_sample(self, target):
batch_size = target.shape[0]
if not GPU:
negative_sample = np.zeros((batch_size, self.sample_size), dtype=np.int32)
for i in range(batch_size):
p = self.word_p.copy()
target_idx = target[i]
p[target_idx] = 0
p /= p.sum()
negative_sample[i, :] = np.random.choice(self.vocab_size, size=self.sample_size, replace=False, p=p)
else:
negative_sample = np.random.choice(self.vocab_size, size=(batch_size, self.sample_size),
replace=True, p=self.word_p)
return negative_sample
class NegativeSamplingLoss:
def __init__(self, W, corpus, power=0.75, sample_size=5):
self.sample_size = sample_size
self.sampler = UnigramSampler(corpus, power, sample_size)
self.loss_layers = [SigmoidWithLoss() for _ in range(sample_size + 1)]
self.embed_dot_layers = [EmbeddingDot(W) for _ in range(sample_size + 1)]
self.params, self.grads = [], []
for layer in self.embed_dot_layers:
self.params += layer.params
self.grads += layer.grads
def forward(self, h, target):
batch_size = target.shape[0]
negative_sample = self.sampler.get_negative_sample(target)
score = self.embed_dot_layers[0].forward(h, target)
correct_label = np.ones(batch_size, dtype=np.int32)
loss = self.loss_layers[0].forward(score, correct_label)
negative_label = np.zeros(batch_size, dtype=np.int32)
for i in range(self.sample_size):
negative_target = negative_sample[:, i]
score = self.embed_dot_layers[1 + i].forward(h, negative_target)
loss += self.loss_layers[1 + i].forward(score, negative_label)
return loss
def backward(self, dout=1):
dh = 0
for l0, l1 in zip(self.loss_layers, self.embed_dot_layers):
dscore = l0.backward(dout)
dh += l1.backward(dscore)
return dh | [
"show2214@icloud.com"
] | show2214@icloud.com |
6cf60993a32a3c90768425762696112fcbe27ec0 | 95b8130d908c79f8192c6813fee6220ccb05c5c3 | /tests/test_stumped.py | c4744d728110eaecf1b9f334aa45e69bec1187bc | [
"BSD-3-Clause"
] | permissive | canslove/stumpy | 4ac96d44eeef24f8e2add3dd0bab0788cb117d32 | 2c1a9ace0d7241435d1e5c6578e7dca45e541108 | refs/heads/master | 2020-06-01T06:16:22.238904 | 2019-06-06T18:50:22 | 2019-06-06T18:50:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,692 | py | import numpy as np
import numpy.testing as npt
import pandas as pd
from stumpy import stumped, core
from dask.distributed import Client, LocalCluster
import pytest
import warnings
@pytest.fixture(scope="module")
def dask_client():
cluster = LocalCluster(n_workers=None, threads_per_worker=2)
client = Client(cluster)
yield client
# teardown
client.close()
cluster.close()
def naive_mass(Q, T, m, trivial_idx=None, excl_zone=0, ignore_trivial=False):
D = np.linalg.norm(
core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1
)
if ignore_trivial:
start = max(0, trivial_idx - excl_zone)
stop = min(T.shape[0] - Q.shape[0] + 1, trivial_idx + excl_zone)
D[start:stop] = np.inf
I = np.argmin(D)
P = D[I]
# Get left and right matrix profiles for self-joins
if ignore_trivial and trivial_idx > 0:
PL = np.inf
IL = -1
for i in range(trivial_idx):
if D[i] < PL:
IL = i
PL = D[i]
if start <= IL <= stop:
IL = -1
else:
IL = -1
if ignore_trivial and trivial_idx + 1 < D.shape[0]:
PR = np.inf
IR = -1
for i in range(trivial_idx + 1, D.shape[0]):
if D[i] < PR:
IR = i
PR = D[i]
if start <= IR <= stop:
IR = -1
else:
IR = -1
return P, I, IL, IR
def replace_inf(x, value=0):
x[x == np.inf] = value
x[x == -np.inf] = value
return
test_data = [
(
np.array([9, 8100, -60, 7], dtype=np.float64),
np.array([584, -11, 23, 79, 1001, 0, -19], dtype=np.float64),
),
(
np.random.uniform(-1000, 1000, [8]).astype(np.float64),
np.random.uniform(-1000, 1000, [64]).astype(np.float64),
),
]
@pytest.mark.filterwarnings("ignore:numpy.dtype size changed")
@pytest.mark.filterwarnings("ignore:numpy.ufunc size changed")
@pytest.mark.filterwarnings("ignore:numpy.ndarray size changed")
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_stumped_self_join(T_A, T_B, dask_client):
dask_client.restart()
m = 3
zone = int(np.ceil(m / 4))
left = np.array(
[
naive_mass(Q, T_B, m, i, zone, True)
for i, Q in enumerate(core.rolling_window(T_B, m))
],
dtype=object,
)
right = stumped(dask_client, T_B, m, ignore_trivial=True)
replace_inf(left)
replace_inf(right)
npt.assert_almost_equal(left, right)
dask_client.restart()
right = stumped(dask_client, pd.Series(T_B), m, ignore_trivial=True)
replace_inf(right)
npt.assert_almost_equal(left, right)
dask_client.restart()
@pytest.mark.filterwarnings("ignore:numpy.dtype size changed")
@pytest.mark.filterwarnings("ignore:numpy.ufunc size changed")
@pytest.mark.filterwarnings("ignore:numpy.ndarray size changed")
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_stumped_A_B_join(T_A, T_B, dask_client):
dask_client.restart()
m = 3
left = np.array(
[naive_mass(Q, T_A, m) for Q in core.rolling_window(T_B, m)], dtype=object
)
right = stumped(dask_client, T_A, m, T_B, ignore_trivial=False)
replace_inf(left)
replace_inf(right)
npt.assert_almost_equal(left, right)
dask_client.restart()
right = stumped(
dask_client, pd.Series(T_A), m, pd.Series(T_B), ignore_trivial=False
)
replace_inf(right)
npt.assert_almost_equal(left, right)
dask_client.restart()
| [
"seanmylaw@gmail.com"
] | seanmylaw@gmail.com |
78fb6dfd977597e0296dba31f5b7f924af76d0ee | 32997e6a8607358765254ea81d2f867269ae2b35 | /09-problems/ed_002_max_cpu_load.py | d7ce673b607b109f2ba41cecf865ebb19e7f70fa | [
"MIT"
] | permissive | aman-singh7/training.computerscience.algorithms-datastructures | 0ace578ebcec13c5293b4d4dccdaa7634788604d | a4e1d1973b091589690fd2efc5dcb3c1a4df6c4c | refs/heads/master | 2023-06-09T12:27:55.569254 | 2021-06-29T20:16:37 | 2021-06-29T20:16:37 | 401,133,325 | 1 | 0 | MIT | 2021-08-29T20:12:50 | 2021-08-29T20:12:49 | null | UTF-8 | Python | false | false | 2,706 | py | """
1. Problem Summary / Clarifications / TDD:
[[1,4,3], [2,5,4], [7,9,6]]: 7
[[6,7,10], [2,4,11], [8,12,15]]: 15
[[1,4,2], [2,4,1], [3,6,5]]: 8
Output: 8
2. Intuition:
1. Store store current end time and current load
2. Compute current load: current load + curr_job.cpu_load - all previous job cpu load which job.end < curr_job.start
3. Compute the max cpu load
3. Implementation:
4. Tests:
Edge case 1: []: 0
Edge case 2: [[0,2,3]]: 3
Edge case 3: [[0,2,3],[0,2,3]]: 6
Spacial case: [[0,20,3],[1,21,3],[2,22,3],[3,23,3]]: 12
Cases above
5: Complexity Analysis:
Time Complexity: O(nlogn) because of the sorting and heappush/heappop
Space Complexity: O(n) when max(jobs.start.values) < min(jobs.end.values)
"""
import heapq
class Solution:
def __init__(self):
self._start = 0
self._end = 1
self._cpu_load = 2
def find_max_cpu_load(self, jobs):
# 1. Sort all job by job start time
jobs.sort(key=lambda job: job[self._start])
job_end_time_heap = []
# 2. Compute cpu max load
cpu_max_load = 0
cpu_curr_load = 0
for job in jobs:
# 2.1. Deduce all previous job cpu loads
while job_end_time_heap and job[self._start] > job_end_time_heap[0][0]:
cpu_curr_load -= job_end_time_heap[0][1]
heapq.heappop(job_end_time_heap)
# 2.2. Add current job cpu load
cpu_curr_load += job[self._cpu_load]
# 2.3. Push current job cpu load
heapq.heappush(job_end_time_heap, (job[self._end], job[self._cpu_load]))
cpu_max_load = max(cpu_max_load, cpu_curr_load)
return cpu_max_load
if __name__ == '__main__':
max_cpu_load_solution = Solution()
# Edge Cases:
print('[]: ', max_cpu_load_solution.find_max_cpu_load([]))
print('[[0,2,3]]: ', max_cpu_load_solution.find_max_cpu_load([[0,2,3]]))
print('[[0,2,3],[0,2,3]]: ', max_cpu_load_solution.find_max_cpu_load([[0,2,3],[0,2,3]]))
# Spacial Cases:
print('[[0,20,3],[1,21,3],[2,22,3],[3,23,3]]: ', max_cpu_load_solution.find_max_cpu_load([[0,20,3],[1,21,3],[2,22,3],[3,23,3]]))
# Test Cases:
print('[[1,4,3],[2,5,4],[7,9,6]]: ', max_cpu_load_solution.find_max_cpu_load([[1,4,3],[2,5,4],[7,9,6]]))
print('[[6,7,10],[2,4,11],[8,12,15]]: ', max_cpu_load_solution.find_max_cpu_load([[6,7,10],[2,4,11],[8,12,15]]))
print('[[1,4,2],[2,4,1],[3,6,5]]: ', max_cpu_load_solution.find_max_cpu_load([[1,4,2],[2,4,1],[3,6,5]]))
| [
"mamid1706@hotmail.fr"
] | mamid1706@hotmail.fr |
19eeeb6f65f752d9650f12b7db8cbd7fd4e52021 | 0fac73e70eeb8e3b8635de8a4eaba1197cd42641 | /shop/migrations/0009_auto_20161218_1423.py | 6c991c55ff7db73b3fcce1e6b9e86531343964e6 | [] | no_license | gauraviit1/myshop_aws | 0e6c9d822cbbc6505eb7c7a71654d34591e7b168 | 261b296d79cfdf8fa4cb9105b4e2fe70e864f6a6 | refs/heads/master | 2021-01-19T13:44:12.977253 | 2017-03-03T07:52:58 | 2017-03-03T07:52:58 | 82,444,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-12-18 08:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0008_auto_20161218_1418'),
]
operations = [
migrations.AlterField(
model_name='productattribute',
name='size',
field=models.CharField(blank=True, max_length=2),
),
migrations.AlterField(
model_name='productattribute',
name='waist_size',
field=models.PositiveSmallIntegerField(blank=True),
),
]
| [
"mcjail.shi.hp@gmail.com"
] | mcjail.shi.hp@gmail.com |
126fdf5a0ff5d0a3257837f6a8cb63753004fcc4 | ff6f60d02ed8d024f7b2db5c9eb4b1196ebf166b | /wb_opencv/cameo/utils.py | 871dbd9788f5c3889f123abab833578679012be5 | [] | no_license | cekong/learnit | 43b707e347ff552754b6592e01dd106c98cd0cc5 | b4111d6fee95960f7b7ca5421b7159cb6122ad2a | refs/heads/master | 2020-03-25T13:53:37.848843 | 2019-08-29T06:46:48 | 2019-08-29T06:46:48 | 143,848,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | import cv2
import numpy
import scipy.interpolate #插值 | [
"noreply@github.com"
] | cekong.noreply@github.com |
cf7a6073c70b6641ce1642c80b71357c98691c98 | 441f0b4b4f2016ace7bed37431779b3352b9c2e4 | /YouTube Ignorancia Zero/Ferramentas de Sistema/105 - Ferramentas de Sistema I: Básico módulo sys/105.py | 00f7f36c467e269506283635dc65f9c6e2a2bbff | [] | no_license | AlexGalhardo/Learning-Python | 936b2eae814d148b0b3b77cc76cf81b45fbb4a02 | b710952101a0409f585ba975e2854bf0e0286ac7 | refs/heads/master | 2020-05-19T23:32:49.285710 | 2019-09-04T17:37:27 | 2019-09-04T17:37:27 | 134,312,273 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py |
##### Modulo sys #####
"""
platform = devolve a plataforma de execução
path = lista com todas as pastas ligadas ao programa
exit([args]) = termina a execução de um programa
modules = todos os módulos carregados
exc_info = tupla que contem a ultima excessão levantada
"""
import sys
#if 'win' in sys.platform:
# import winsound
#print(sys.modules)
#try:
# raise IndexError
#except:
# print(sys.exc_info())
| [
"aleexgvieira@gmail.com"
] | aleexgvieira@gmail.com |
85c31e40b493df1d537fa5c1d68f81571561acf1 | 6cc35793f2bac664d2ab9273911b37a256933418 | /Aula18/1.py | 8a83b57ae03116a3b36484bd55697b3268cfeffc | [] | no_license | MarcianoPazinatto/TrabalhosdePython | a6d8384b3586d5619654a70c73a978ce9def9b8a | 74ccf0541de426ad795700234643218953d7b0a0 | refs/heads/master | 2020-09-05T21:28:13.264266 | 2020-03-10T13:40:48 | 2020-03-10T13:40:48 | 220,218,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,749 | py | # Aula 18 - 03-11-2019
# Exercicios para lista simples
# Dada a seguinte lista, resolva os seguintes questões:
lista = [10, 20, 'amor', 'abacaxi', 80, 'Abioluz', 'Cachorro grande é de arrasar']
print('1: Usando a indexação, escreva na tela a palavra abacaxi')
print(lista[3])
##################################################################################
print('\n\n')
print('2: Usando a indexação, escreva na tela os seguintes dados: 20, amor, abacaxi')
print(lista[1:4])
##################################################################################
print('\n\n')
print('3: Usando a indexação, escreva na tela uma lista com dados de 20 até Abioluz')
print(lista[1:6])
##################################################################################
print('\n\n')
print('4: Usando a indexação, escreva na tela uma lista com os seguintes dados:'
'\nCachorro grande é de arrasar, Abioluz, 80, abacaxi, amor, 20, 10')
print(lista[::-1])
##################################################################################
print('\n\n')
print('5: Usando o f-string e a indexação escreva na tela os seguintes dados:'
'\n { abacaxi } é muito bom, sinto muito { amor } quando eu chupo { 80 }" deles.')
print(f'{ lista[3]} é muito bom, sinto muito { lista[2] } quando eu chupo { lista[4]} deles.')
##################################################################################
print('\n\n')
print('6: Usando a indexação, escreva na tela os seguintes dados:'
'\n10, amor, 80, Cachorro grande é de arrasar')
print(lista[::2])
##################################################################################
print('\n\n')
print('7: Usando o f-string e a indexação escreva na tela os seguintes dados:'
'Abioluz - abacaxi - 10 - Cachorro grande é de arrasar - 20 - 80' )
print(f'{lista[5]}-{lista[3]}-{lista[0]}-{lista[6]}-{lista[1]}-{lista[4]}')
##################################################################################
print('\n\n')
print('8: Usando o f-string e a indexação escreva na tela os seguintes dados:'
'\namor - 10 - 10 - abacaxi - Cachorro grande é de arrasar - Abioluz - 10 - 20')
print(f'{lista[2]}-{lista[0]}-{lista[0]}-{lista[3]}-{lista[6]}-{lista[5]}-{lista[0]}-{lista[1]}')
##################################################################################
print('\n\n')
print('9: Usando a indexação, escreva na tela uma lista com dados de 10 até 80')
print(lista[0:4])
##################################################################################
print('\n\n')
print('10: Usando a indexação, escreva na tela os seguintes dados:'
'\n10, abacaxi, Cachorro grande é de arrasar')
print(lista[::3]) | [
"marciano.et@hotmail.com"
] | marciano.et@hotmail.com |
3873bba1c404a7e7984c0597b55e018dc11f41f4 | 0bb8e1d97434d079d02f2645b54a4489bee91264 | /openpyxl2/drawing/tests/test_properties.py | 4823cbedbad3cc0ed424805f97df27bcf1649e43 | [
"MIT"
] | permissive | j5int/openpyxl2 | 1313dba978179161acfc005e147ed7eed34c249a | 3c82567c33d6cad5b0b26eea97da7bb39ba7f4c8 | refs/heads/master | 2020-04-05T12:50:02.977837 | 2018-11-09T11:55:31 | 2018-11-09T11:55:31 | 156,882,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,858 | py | from __future__ import absolute_import
# Copyright (c) 2010-2018 openpyxl
import pytest
from openpyxl2.xml.functions import fromstring, tostring
from openpyxl2.tests.helper import compare_xml
@pytest.fixture
def NonVisualDrawingProps():
from ..properties import NonVisualDrawingProps
return NonVisualDrawingProps
class TestNonVisualDrawingProps:
def test_ctor(self, NonVisualDrawingProps):
graphic = NonVisualDrawingProps(id=2, name="Chart 1")
xml = tostring(graphic.to_tree())
expected = """
<cNvPr id="2" name="Chart 1"></cNvPr>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, NonVisualDrawingProps):
src = """
<cNvPr id="3" name="Chart 2"></cNvPr>
"""
node = fromstring(src)
graphic = NonVisualDrawingProps.from_tree(node)
assert graphic == NonVisualDrawingProps(id=3, name="Chart 2")
@pytest.fixture
def NonVisualGroupDrawingShapeProps():
from ..properties import NonVisualGroupDrawingShapeProps
return NonVisualGroupDrawingShapeProps
class TestNonVisualGroupDrawingShapeProps:
def test_ctor(self, NonVisualGroupDrawingShapeProps):
props = NonVisualGroupDrawingShapeProps()
xml = tostring(props.to_tree())
expected = """
<cNvGrpSpPr />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, NonVisualGroupDrawingShapeProps):
src = """
<cNvGrpSpPr />
"""
node = fromstring(src)
props = NonVisualGroupDrawingShapeProps.from_tree(node)
assert props == NonVisualGroupDrawingShapeProps()
@pytest.fixture
def NonVisualGroupShape():
from ..properties import NonVisualGroupShape
return NonVisualGroupShape
class TestNonVisualGroupShape:
def test_ctor(self, NonVisualGroupShape, NonVisualDrawingProps, NonVisualGroupDrawingShapeProps):
props = NonVisualGroupShape(
cNvPr=NonVisualDrawingProps(id=2208, name="Group 1"),
cNvGrpSpPr=NonVisualGroupDrawingShapeProps()
)
xml = tostring(props.to_tree())
expected = """
<nvGrpSpPr>
<cNvPr id="2208" name="Group 1" />
<cNvGrpSpPr />
</nvGrpSpPr>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, NonVisualGroupShape, NonVisualDrawingProps, NonVisualGroupDrawingShapeProps):
src = """
<nvGrpSpPr>
<cNvPr id="2208" name="Group 1" />
<cNvGrpSpPr />
</nvGrpSpPr>
"""
node = fromstring(src)
props = NonVisualGroupShape.from_tree(node)
assert props == NonVisualGroupShape(
cNvPr=NonVisualDrawingProps(id=2208, name="Group 1"),
cNvGrpSpPr=NonVisualGroupDrawingShapeProps()
)
@pytest.fixture
def GroupLocking():
from ..properties import GroupLocking
return GroupLocking
class TestGroupLocking:
def test_ctor(self, GroupLocking):
lock = GroupLocking()
xml = tostring(lock.to_tree())
expected = """
<grpSpLocks xmlns="http://schemas.openxmlformats.org/drawingml/2006/main" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, GroupLocking):
src = """
<grpSpLocks />
"""
node = fromstring(src)
lock = GroupLocking.from_tree(node)
assert lock == GroupLocking()
@pytest.fixture
def GroupShapeProperties():
from ..properties import GroupShapeProperties
return GroupShapeProperties
from ..geometry import Point2D, PositiveSize2D, GroupTransform2D
class TestGroupShapeProperties:
def test_ctor(self, GroupShapeProperties):
xfrm = GroupTransform2D(
off=Point2D(x=2222500, y=0),
ext=PositiveSize2D(cx=2806700, cy=825500),
chOff=Point2D(x=303, y=0),
chExt=PositiveSize2D(cx=321, cy=111),
)
props = GroupShapeProperties(bwMode="auto", xfrm=xfrm)
xml = tostring(props.to_tree())
expected = """
<grpSpPr bwMode="auto" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:xfrm rot="0">
<a:off x="2222500" y="0"/>
<a:ext cx="2806700" cy="825500"/>
<a:chOff x="303" y="0"/>
<a:chExt cx="321" cy="111"/>
</a:xfrm>
</grpSpPr>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, GroupShapeProperties):
src = """
<grpSpPr />
"""
node = fromstring(src)
fut = GroupShapeProperties.from_tree(node)
assert fut == GroupShapeProperties()
| [
"charlie.clark@clark-consulting.eu"
] | charlie.clark@clark-consulting.eu |
ba09c36cf81e209d81983e69e414451144e1d77c | 528def9844f2ce13e6a358938b0b560945ab2248 | /cmibs/cisco_vtp_mib.py | e9b97bbfd0e0a75b417020b9976cc87b709cd405 | [
"BSD-3-Clause"
] | permissive | skripkar/noc | 055afbd42ab4c447d05d2cde0a822916f9e0844e | df193b99e478fe39157c8d27ff4098262d9cb734 | refs/heads/master | 2020-04-10T12:53:09.602779 | 2018-12-08T07:50:30 | 2018-12-08T07:50:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,021 | py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# CISCO-VTP-MIB
# Compiled MIB
# Do not modify this file directly
# Run ./noc mib make_cmib instead
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# MIB Name
NAME = "CISCO-VTP-MIB"
# Metadata
LAST_UPDATED = "2010-05-12"
COMPILED = "2018-06-09"
# MIB Data: name -> oid
MIB = {
"CISCO-VTP-MIB::ciscoVtpMIB": "1.3.6.1.4.1.9.9.46",
"CISCO-VTP-MIB::vtpMIBObjects": "1.3.6.1.4.1.9.9.46.1",
"CISCO-VTP-MIB::vtpStatus": "1.3.6.1.4.1.9.9.46.1.1",
"CISCO-VTP-MIB::vtpVersion": "1.3.6.1.4.1.9.9.46.1.1.1",
"CISCO-VTP-MIB::vtpMaxVlanStorage": "1.3.6.1.4.1.9.9.46.1.1.2",
"CISCO-VTP-MIB::vtpNotificationsEnabled": "1.3.6.1.4.1.9.9.46.1.1.3",
"CISCO-VTP-MIB::vtpVlanCreatedNotifEnabled": "1.3.6.1.4.1.9.9.46.1.1.4",
"CISCO-VTP-MIB::vtpVlanDeletedNotifEnabled": "1.3.6.1.4.1.9.9.46.1.1.5",
"CISCO-VTP-MIB::vlanManagementDomains": "1.3.6.1.4.1.9.9.46.1.2",
"CISCO-VTP-MIB::managementDomainTable": "1.3.6.1.4.1.9.9.46.1.2.1",
"CISCO-VTP-MIB::managementDomainEntry": "1.3.6.1.4.1.9.9.46.1.2.1.1",
"CISCO-VTP-MIB::managementDomainIndex": "1.3.6.1.4.1.9.9.46.1.2.1.1.1",
"CISCO-VTP-MIB::managementDomainName": "1.3.6.1.4.1.9.9.46.1.2.1.1.2",
"CISCO-VTP-MIB::managementDomainLocalMode": "1.3.6.1.4.1.9.9.46.1.2.1.1.3",
"CISCO-VTP-MIB::managementDomainConfigRevNumber": "1.3.6.1.4.1.9.9.46.1.2.1.1.4",
"CISCO-VTP-MIB::managementDomainLastUpdater": "1.3.6.1.4.1.9.9.46.1.2.1.1.5",
"CISCO-VTP-MIB::managementDomainLastChange": "1.3.6.1.4.1.9.9.46.1.2.1.1.6",
"CISCO-VTP-MIB::managementDomainRowStatus": "1.3.6.1.4.1.9.9.46.1.2.1.1.7",
"CISCO-VTP-MIB::managementDomainTftpServer": "1.3.6.1.4.1.9.9.46.1.2.1.1.8",
"CISCO-VTP-MIB::managementDomainTftpPathname": "1.3.6.1.4.1.9.9.46.1.2.1.1.9",
"CISCO-VTP-MIB::managementDomainPruningState": "1.3.6.1.4.1.9.9.46.1.2.1.1.10",
"CISCO-VTP-MIB::managementDomainVersionInUse": "1.3.6.1.4.1.9.9.46.1.2.1.1.11",
"CISCO-VTP-MIB::managementDomainPruningStateOper": "1.3.6.1.4.1.9.9.46.1.2.1.1.12",
"CISCO-VTP-MIB::vlanInfo": "1.3.6.1.4.1.9.9.46.1.3",
"CISCO-VTP-MIB::vtpVlanTable": "1.3.6.1.4.1.9.9.46.1.3.1",
"CISCO-VTP-MIB::vtpVlanEntry": "1.3.6.1.4.1.9.9.46.1.3.1.1",
"CISCO-VTP-MIB::vtpVlanIndex": "1.3.6.1.4.1.9.9.46.1.3.1.1.1",
"CISCO-VTP-MIB::vtpVlanState": "1.3.6.1.4.1.9.9.46.1.3.1.1.2",
"CISCO-VTP-MIB::vtpVlanType": "1.3.6.1.4.1.9.9.46.1.3.1.1.3",
"CISCO-VTP-MIB::vtpVlanName": "1.3.6.1.4.1.9.9.46.1.3.1.1.4",
"CISCO-VTP-MIB::vtpVlanMtu": "1.3.6.1.4.1.9.9.46.1.3.1.1.5",
"CISCO-VTP-MIB::vtpVlanDot10Said": "1.3.6.1.4.1.9.9.46.1.3.1.1.6",
"CISCO-VTP-MIB::vtpVlanRingNumber": "1.3.6.1.4.1.9.9.46.1.3.1.1.7",
"CISCO-VTP-MIB::vtpVlanBridgeNumber": "1.3.6.1.4.1.9.9.46.1.3.1.1.8",
"CISCO-VTP-MIB::vtpVlanStpType": "1.3.6.1.4.1.9.9.46.1.3.1.1.9",
"CISCO-VTP-MIB::vtpVlanParentVlan": "1.3.6.1.4.1.9.9.46.1.3.1.1.10",
"CISCO-VTP-MIB::vtpVlanTranslationalVlan1": "1.3.6.1.4.1.9.9.46.1.3.1.1.11",
"CISCO-VTP-MIB::vtpVlanTranslationalVlan2": "1.3.6.1.4.1.9.9.46.1.3.1.1.12",
"CISCO-VTP-MIB::vtpVlanBridgeType": "1.3.6.1.4.1.9.9.46.1.3.1.1.13",
"CISCO-VTP-MIB::vtpVlanAreHopCount": "1.3.6.1.4.1.9.9.46.1.3.1.1.14",
"CISCO-VTP-MIB::vtpVlanSteHopCount": "1.3.6.1.4.1.9.9.46.1.3.1.1.15",
"CISCO-VTP-MIB::vtpVlanIsCRFBackup": "1.3.6.1.4.1.9.9.46.1.3.1.1.16",
"CISCO-VTP-MIB::vtpVlanTypeExt": "1.3.6.1.4.1.9.9.46.1.3.1.1.17",
"CISCO-VTP-MIB::vtpVlanIfIndex": "1.3.6.1.4.1.9.9.46.1.3.1.1.18",
"CISCO-VTP-MIB::internalVlanInfo": "1.3.6.1.4.1.9.9.46.1.3.2",
"CISCO-VTP-MIB::vtpInternalVlanAllocPolicy": "1.3.6.1.4.1.9.9.46.1.3.2.1",
"CISCO-VTP-MIB::vtpInternalVlanTable": "1.3.6.1.4.1.9.9.46.1.3.2.2",
"CISCO-VTP-MIB::vtpInternalVlanEntry": "1.3.6.1.4.1.9.9.46.1.3.2.2.1",
"CISCO-VTP-MIB::vtpInternalVlanOwner": "1.3.6.1.4.1.9.9.46.1.3.2.2.1.1",
"CISCO-VTP-MIB::vlanEdit": "1.3.6.1.4.1.9.9.46.1.4",
"CISCO-VTP-MIB::vtpEditControlTable": "1.3.6.1.4.1.9.9.46.1.4.1",
"CISCO-VTP-MIB::vtpEditControlEntry": "1.3.6.1.4.1.9.9.46.1.4.1.1",
"CISCO-VTP-MIB::vtpVlanEditOperation": "1.3.6.1.4.1.9.9.46.1.4.1.1.1",
"CISCO-VTP-MIB::vtpVlanApplyStatus": "1.3.6.1.4.1.9.9.46.1.4.1.1.2",
"CISCO-VTP-MIB::vtpVlanEditBufferOwner": "1.3.6.1.4.1.9.9.46.1.4.1.1.3",
"CISCO-VTP-MIB::vtpVlanEditConfigRevNumber": "1.3.6.1.4.1.9.9.46.1.4.1.1.4",
"CISCO-VTP-MIB::vtpVlanEditModifiedVlan": "1.3.6.1.4.1.9.9.46.1.4.1.1.5",
"CISCO-VTP-MIB::vtpVlanEditTable": "1.3.6.1.4.1.9.9.46.1.4.2",
"CISCO-VTP-MIB::vtpVlanEditEntry": "1.3.6.1.4.1.9.9.46.1.4.2.1",
"CISCO-VTP-MIB::vtpVlanEditIndex": "1.3.6.1.4.1.9.9.46.1.4.2.1.1",
"CISCO-VTP-MIB::vtpVlanEditState": "1.3.6.1.4.1.9.9.46.1.4.2.1.2",
"CISCO-VTP-MIB::vtpVlanEditType": "1.3.6.1.4.1.9.9.46.1.4.2.1.3",
"CISCO-VTP-MIB::vtpVlanEditName": "1.3.6.1.4.1.9.9.46.1.4.2.1.4",
"CISCO-VTP-MIB::vtpVlanEditMtu": "1.3.6.1.4.1.9.9.46.1.4.2.1.5",
"CISCO-VTP-MIB::vtpVlanEditDot10Said": "1.3.6.1.4.1.9.9.46.1.4.2.1.6",
"CISCO-VTP-MIB::vtpVlanEditRingNumber": "1.3.6.1.4.1.9.9.46.1.4.2.1.7",
"CISCO-VTP-MIB::vtpVlanEditBridgeNumber": "1.3.6.1.4.1.9.9.46.1.4.2.1.8",
"CISCO-VTP-MIB::vtpVlanEditStpType": "1.3.6.1.4.1.9.9.46.1.4.2.1.9",
"CISCO-VTP-MIB::vtpVlanEditParentVlan": "1.3.6.1.4.1.9.9.46.1.4.2.1.10",
"CISCO-VTP-MIB::vtpVlanEditRowStatus": "1.3.6.1.4.1.9.9.46.1.4.2.1.11",
"CISCO-VTP-MIB::vtpVlanEditTranslationalVlan1": "1.3.6.1.4.1.9.9.46.1.4.2.1.12",
"CISCO-VTP-MIB::vtpVlanEditTranslationalVlan2": "1.3.6.1.4.1.9.9.46.1.4.2.1.13",
"CISCO-VTP-MIB::vtpVlanEditBridgeType": "1.3.6.1.4.1.9.9.46.1.4.2.1.14",
"CISCO-VTP-MIB::vtpVlanEditAreHopCount": "1.3.6.1.4.1.9.9.46.1.4.2.1.15",
"CISCO-VTP-MIB::vtpVlanEditSteHopCount": "1.3.6.1.4.1.9.9.46.1.4.2.1.16",
"CISCO-VTP-MIB::vtpVlanEditIsCRFBackup": "1.3.6.1.4.1.9.9.46.1.4.2.1.17",
"CISCO-VTP-MIB::vtpVlanEditTypeExt": "1.3.6.1.4.1.9.9.46.1.4.2.1.18",
"CISCO-VTP-MIB::vtpVlanEditTypeExt2": "1.3.6.1.4.1.9.9.46.1.4.2.1.19",
"CISCO-VTP-MIB::vtpStats": "1.3.6.1.4.1.9.9.46.1.5",
"CISCO-VTP-MIB::vtpStatsTable": "1.3.6.1.4.1.9.9.46.1.5.1",
"CISCO-VTP-MIB::vtpStatsEntry": "1.3.6.1.4.1.9.9.46.1.5.1.1",
"CISCO-VTP-MIB::vtpInSummaryAdverts": "1.3.6.1.4.1.9.9.46.1.5.1.1.1",
"CISCO-VTP-MIB::vtpInSubsetAdverts": "1.3.6.1.4.1.9.9.46.1.5.1.1.2",
"CISCO-VTP-MIB::vtpInAdvertRequests": "1.3.6.1.4.1.9.9.46.1.5.1.1.3",
"CISCO-VTP-MIB::vtpOutSummaryAdverts": "1.3.6.1.4.1.9.9.46.1.5.1.1.4",
"CISCO-VTP-MIB::vtpOutSubsetAdverts": "1.3.6.1.4.1.9.9.46.1.5.1.1.5",
"CISCO-VTP-MIB::vtpOutAdvertRequests": "1.3.6.1.4.1.9.9.46.1.5.1.1.6",
"CISCO-VTP-MIB::vtpConfigRevNumberErrors": "1.3.6.1.4.1.9.9.46.1.5.1.1.7",
"CISCO-VTP-MIB::vtpConfigDigestErrors": "1.3.6.1.4.1.9.9.46.1.5.1.1.8",
"CISCO-VTP-MIB::vlanTrunkPorts": "1.3.6.1.4.1.9.9.46.1.6",
"CISCO-VTP-MIB::vlanTrunkPortTable": "1.3.6.1.4.1.9.9.46.1.6.1",
"CISCO-VTP-MIB::vlanTrunkPortEntry": "1.3.6.1.4.1.9.9.46.1.6.1.1",
"CISCO-VTP-MIB::vlanTrunkPortIfIndex": "1.3.6.1.4.1.9.9.46.1.6.1.1.1",
"CISCO-VTP-MIB::vlanTrunkPortManagementDomain": "1.3.6.1.4.1.9.9.46.1.6.1.1.2",
"CISCO-VTP-MIB::vlanTrunkPortEncapsulationType": "1.3.6.1.4.1.9.9.46.1.6.1.1.3",
"CISCO-VTP-MIB::vlanTrunkPortVlansEnabled": "1.3.6.1.4.1.9.9.46.1.6.1.1.4",
"CISCO-VTP-MIB::vlanTrunkPortNativeVlan": "1.3.6.1.4.1.9.9.46.1.6.1.1.5",
"CISCO-VTP-MIB::vlanTrunkPortRowStatus": "1.3.6.1.4.1.9.9.46.1.6.1.1.6",
"CISCO-VTP-MIB::vlanTrunkPortInJoins": "1.3.6.1.4.1.9.9.46.1.6.1.1.7",
"CISCO-VTP-MIB::vlanTrunkPortOutJoins": "1.3.6.1.4.1.9.9.46.1.6.1.1.8",
"CISCO-VTP-MIB::vlanTrunkPortOldAdverts": "1.3.6.1.4.1.9.9.46.1.6.1.1.9",
"CISCO-VTP-MIB::vlanTrunkPortVlansPruningEligible": "1.3.6.1.4.1.9.9.46.1.6.1.1.10",
"CISCO-VTP-MIB::vlanTrunkPortVlansXmitJoined": "1.3.6.1.4.1.9.9.46.1.6.1.1.11",
"CISCO-VTP-MIB::vlanTrunkPortVlansRcvJoined": "1.3.6.1.4.1.9.9.46.1.6.1.1.12",
"CISCO-VTP-MIB::vlanTrunkPortDynamicState": "1.3.6.1.4.1.9.9.46.1.6.1.1.13",
"CISCO-VTP-MIB::vlanTrunkPortDynamicStatus": "1.3.6.1.4.1.9.9.46.1.6.1.1.14",
"CISCO-VTP-MIB::vlanTrunkPortVtpEnabled": "1.3.6.1.4.1.9.9.46.1.6.1.1.15",
"CISCO-VTP-MIB::vlanTrunkPortEncapsulationOperType": "1.3.6.1.4.1.9.9.46.1.6.1.1.16",
"CISCO-VTP-MIB::vlanTrunkPortVlansEnabled2k": "1.3.6.1.4.1.9.9.46.1.6.1.1.17",
"CISCO-VTP-MIB::vlanTrunkPortVlansEnabled3k": "1.3.6.1.4.1.9.9.46.1.6.1.1.18",
"CISCO-VTP-MIB::vlanTrunkPortVlansEnabled4k": "1.3.6.1.4.1.9.9.46.1.6.1.1.19",
"CISCO-VTP-MIB::vtpVlansPruningEligible2k": "1.3.6.1.4.1.9.9.46.1.6.1.1.20",
"CISCO-VTP-MIB::vtpVlansPruningEligible3k": "1.3.6.1.4.1.9.9.46.1.6.1.1.21",
"CISCO-VTP-MIB::vtpVlansPruningEligible4k": "1.3.6.1.4.1.9.9.46.1.6.1.1.22",
"CISCO-VTP-MIB::vlanTrunkPortVlansXmitJoined2k": "1.3.6.1.4.1.9.9.46.1.6.1.1.23",
"CISCO-VTP-MIB::vlanTrunkPortVlansXmitJoined3k": "1.3.6.1.4.1.9.9.46.1.6.1.1.24",
"CISCO-VTP-MIB::vlanTrunkPortVlansXmitJoined4k": "1.3.6.1.4.1.9.9.46.1.6.1.1.25",
"CISCO-VTP-MIB::vlanTrunkPortVlansRcvJoined2k": "1.3.6.1.4.1.9.9.46.1.6.1.1.26",
"CISCO-VTP-MIB::vlanTrunkPortVlansRcvJoined3k": "1.3.6.1.4.1.9.9.46.1.6.1.1.27",
"CISCO-VTP-MIB::vlanTrunkPortVlansRcvJoined4k": "1.3.6.1.4.1.9.9.46.1.6.1.1.28",
"CISCO-VTP-MIB::vlanTrunkPortDot1qTunnel": "1.3.6.1.4.1.9.9.46.1.6.1.1.29",
"CISCO-VTP-MIB::vlanTrunkPortVlansActiveFirst2k": "1.3.6.1.4.1.9.9.46.1.6.1.1.30",
"CISCO-VTP-MIB::vlanTrunkPortVlansActiveSecond2k": "1.3.6.1.4.1.9.9.46.1.6.1.1.31",
"CISCO-VTP-MIB::vlanTrunkPortSetSerialNo": "1.3.6.1.4.1.9.9.46.1.6.2",
"CISCO-VTP-MIB::vlanTrunkPortsDot1qTag": "1.3.6.1.4.1.9.9.46.1.6.3",
"CISCO-VTP-MIB::vtpDiscover": "1.3.6.1.4.1.9.9.46.1.7",
"CISCO-VTP-MIB::vtpDiscoverTable": "1.3.6.1.4.1.9.9.46.1.7.1",
"CISCO-VTP-MIB::vtpDiscoverEntry": "1.3.6.1.4.1.9.9.46.1.7.1.1",
"CISCO-VTP-MIB::vtpDiscoverAction": "1.3.6.1.4.1.9.9.46.1.7.1.1.1",
"CISCO-VTP-MIB::vtpDiscoverStatus": "1.3.6.1.4.1.9.9.46.1.7.1.1.2",
"CISCO-VTP-MIB::vtpLastDiscoverTime": "1.3.6.1.4.1.9.9.46.1.7.1.1.3",
"CISCO-VTP-MIB::vtpDiscoverResultTable": "1.3.6.1.4.1.9.9.46.1.7.2",
"CISCO-VTP-MIB::vtpDiscoverResultEntry": "1.3.6.1.4.1.9.9.46.1.7.2.1",
"CISCO-VTP-MIB::vtpDiscoverResultIndex": "1.3.6.1.4.1.9.9.46.1.7.2.1.1",
"CISCO-VTP-MIB::vtpDiscoverResultDatabaseName": "1.3.6.1.4.1.9.9.46.1.7.2.1.2",
"CISCO-VTP-MIB::vtpDiscoverResultConflicting": "1.3.6.1.4.1.9.9.46.1.7.2.1.3",
"CISCO-VTP-MIB::vtpDiscoverResultDeviceId": "1.3.6.1.4.1.9.9.46.1.7.2.1.4",
"CISCO-VTP-MIB::vtpDiscoverResultPrimaryServer": "1.3.6.1.4.1.9.9.46.1.7.2.1.5",
"CISCO-VTP-MIB::vtpDiscoverResultRevNumber": "1.3.6.1.4.1.9.9.46.1.7.2.1.6",
"CISCO-VTP-MIB::vtpDiscoverResultSystemName": "1.3.6.1.4.1.9.9.46.1.7.2.1.7",
"CISCO-VTP-MIB::vtpDatabase": "1.3.6.1.4.1.9.9.46.1.8",
"CISCO-VTP-MIB::vtpDatabaseTable": "1.3.6.1.4.1.9.9.46.1.8.1",
"CISCO-VTP-MIB::vtpDatabaseEntry": "1.3.6.1.4.1.9.9.46.1.8.1.1",
"CISCO-VTP-MIB::vtpDatabaseIndex": "1.3.6.1.4.1.9.9.46.1.8.1.1.1",
"CISCO-VTP-MIB::vtpDatabaseName": "1.3.6.1.4.1.9.9.46.1.8.1.1.2",
"CISCO-VTP-MIB::vtpDatabaseLocalMode": "1.3.6.1.4.1.9.9.46.1.8.1.1.3",
"CISCO-VTP-MIB::vtpDatabaseRevNumber": "1.3.6.1.4.1.9.9.46.1.8.1.1.4",
"CISCO-VTP-MIB::vtpDatabasePrimaryServer": "1.3.6.1.4.1.9.9.46.1.8.1.1.5",
"CISCO-VTP-MIB::vtpDatabasePrimaryServerId": "1.3.6.1.4.1.9.9.46.1.8.1.1.6",
"CISCO-VTP-MIB::vtpDatabaseTakeOverPrimary": "1.3.6.1.4.1.9.9.46.1.8.1.1.7",
"CISCO-VTP-MIB::vtpDatabaseTakeOverPassword": "1.3.6.1.4.1.9.9.46.1.8.1.1.8",
"CISCO-VTP-MIB::vtpAuthentication": "1.3.6.1.4.1.9.9.46.1.9",
"CISCO-VTP-MIB::vtpAuthenticationTable": "1.3.6.1.4.1.9.9.46.1.9.1",
"CISCO-VTP-MIB::vtpAuthEntry": "1.3.6.1.4.1.9.9.46.1.9.1.1",
"CISCO-VTP-MIB::vtpAuthPassword": "1.3.6.1.4.1.9.9.46.1.9.1.1.1",
"CISCO-VTP-MIB::vtpAuthPasswordType": "1.3.6.1.4.1.9.9.46.1.9.1.1.2",
"CISCO-VTP-MIB::vtpAuthSecretKey": "1.3.6.1.4.1.9.9.46.1.9.1.1.3",
"CISCO-VTP-MIB::vlanStatistics": "1.3.6.1.4.1.9.9.46.1.10",
"CISCO-VTP-MIB::vlanStatsVlans": "1.3.6.1.4.1.9.9.46.1.10.1",
"CISCO-VTP-MIB::vlanStatsExtendedVlans": "1.3.6.1.4.1.9.9.46.1.10.2",
"CISCO-VTP-MIB::vlanStatsInternalVlans": "1.3.6.1.4.1.9.9.46.1.10.3",
"CISCO-VTP-MIB::vlanStatsFreeVlans": "1.3.6.1.4.1.9.9.46.1.10.4",
"CISCO-VTP-MIB::vtpNotifications": "1.3.6.1.4.1.9.9.46.2",
"CISCO-VTP-MIB::vtpNotificationsPrefix": "1.3.6.1.4.1.9.9.46.2.0",
"CISCO-VTP-MIB::vtpConfigRevNumberError": "1.3.6.1.4.1.9.9.46.2.0.1",
"CISCO-VTP-MIB::vtpConfigDigestError": "1.3.6.1.4.1.9.9.46.2.0.2",
"CISCO-VTP-MIB::vtpServerDisabled": "1.3.6.1.4.1.9.9.46.2.0.3",
"CISCO-VTP-MIB::vtpMtuTooBig": "1.3.6.1.4.1.9.9.46.2.0.4",
"CISCO-VTP-MIB::vtpVersionOneDeviceDetected": "1.3.6.1.4.1.9.9.46.2.0.6",
"CISCO-VTP-MIB::vlanTrunkPortDynamicStatusChange": "1.3.6.1.4.1.9.9.46.2.0.7",
"CISCO-VTP-MIB::vtpLocalModeChanged": "1.3.6.1.4.1.9.9.46.2.0.8",
"CISCO-VTP-MIB::vtpVersionInUseChanged": "1.3.6.1.4.1.9.9.46.2.0.9",
"CISCO-VTP-MIB::vtpVlanCreated": "1.3.6.1.4.1.9.9.46.2.0.10",
"CISCO-VTP-MIB::vtpVlanDeleted": "1.3.6.1.4.1.9.9.46.2.0.11",
"CISCO-VTP-MIB::vtpVlanRingNumberConflict": "1.3.6.1.4.1.9.9.46.2.0.12",
"CISCO-VTP-MIB::vtpPruningStateOperChange": "1.3.6.1.4.1.9.9.46.2.0.13",
"CISCO-VTP-MIB::vtpNotificationsObjects": "1.3.6.1.4.1.9.9.46.2.1",
"CISCO-VTP-MIB::vtpVlanPortLocalSegment": "1.3.6.1.4.1.9.9.46.2.1.1",
"CISCO-VTP-MIB::vtpMIBConformance": "1.3.6.1.4.1.9.9.46.3",
"CISCO-VTP-MIB::vtpMIBCompliances": "1.3.6.1.4.1.9.9.46.3.1",
"CISCO-VTP-MIB::vtpMIBGroups": "1.3.6.1.4.1.9.9.46.3.2"
}
| [
"aversanta@gmail.com"
] | aversanta@gmail.com |
3c913a3354033d6adee53f32448dc623a7fb194f | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /Es985FEDzEQ2tkM75_17.py | e8efaf329556cd3f9c55aca68288ccd6ac325fca | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py |
def caesar_cipher(txt, key):
return ''.join(chr(65+(ord(c)-65+key)%26) if c.isupper() else\
chr(97+(ord(c)-97+key)%26) if c.islower() else c for c in txt)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
cf338dd57c610af022867d3532ff1953b7647c27 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_221/ch9_2020_03_02_13_41_48_396124.py | 09f64ff03facaaf471f3008dd46004ad7227e75c | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | import math
def calcula_volume_da_esfera
V = (4/3)* math.pi*(R**3)
return V
| [
"you@example.com"
] | you@example.com |
2fd9398df9b19b15aae4de2492f5fc6f7afa17cd | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/1478.py | 364c4fc8e1d4c920705b9f693c1fbc451d1e49f2 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | t = int (input() )
for i in range(t):
d,n = map(int,input().split() )
horse=[]
for j in range(n):
temp1,temp2= map(int,input().split() )
horse.append([temp1,temp2])
ans=0
for j in range(n):
need = (d-horse[j][0])/horse[j][1]
if(need > ans):
ans=need
print("Case #"+str(i+1)+": "+"{:.12f}".format(d/ans) )
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
f9581969a8bddb9173474df4eed8e5a281400c07 | f3c0568f0c7ba9d34b2ec3dc9cdce167164d3e45 | /sms-spam.py | 36bd778a626fe33e21486c575d293da6c2307f8e | [] | no_license | renauddahou/sms-spam | f1313e2ea3b1136e2d38e23111981e17f8010583 | 949bd1de4682a2abbf68d42b5fcfc2d25f29d6a5 | refs/heads/main | 2023-04-15T04:31:21.463825 | 2021-04-29T10:56:24 | 2021-04-29T10:56:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,790 | py | import requests
import time
import colorama
import os
import threading
#import socks
#import socket
import urllib.request
import zipfile
import random
import datetime
import sys
import re
import json
from colorama import Fore, Back
from threading import Thread
from random import randint
import asyncio
from requests import get
logo = ("""
##### ### ##### # ###### # ##### ##### ### #
# # # # # # # # # # # # # # # # # ##
# # # # # # # # # # # # # # #
# # # # # # # # # ##### ##### # # #
# # # ####### # # ####### # # # # #
# # # # # # # # # # # # # # # # # #
##### ### ##### # # ###### # # ##### ##### ### #####""")
print(logo)
ip = get('https://api.ipify.org').text
print('\n\n Ваш IP address is: {}'.format(ip))
colorama.init()
thr = 1
thr = int(input("\n\n Введите количество потоков: "))
def mask(str, maska):
if len(str) == maska.count('#'):
str_list = list(str)
for i in str_list:
maska=maska.replace("#", i, 1)
return maska0
print ('\n \n Введите номер жертвы: (3XXXXXXXXX)')
phone = input ('\n\n >>> ')
def sms(counet):
if len(phone) == 11 or len(phone) == 12 or len(phone) == 13:
pass
phone9 = phone[1:]
else:
print ("[!] Неправильный номер.")
sms()
while 1>0:
try:
requests.post("https://youla.ru/web-api/auth/request_code", data={"phone": phone})
print('sms отправлено')
except:
pass
try:
requests.post("https://eda.yandex/api/v1/user/request_authentication_code", json={"phone_number": "+"+phone})
print('sms отправлено')
except:
pass
break
pass
for i in range(thr):
t = threading.Thread(target= sms, args=(i, ), )
try:
t.start()
print(f"Поток {i} запущен!")
time.sleep(3)
except Exception as e:
print(f"Ошибка <{e}> поток `{i}`")
sms()
colorama.init()
logo = '''
##### ### ##### # ###### # ##### ##### ### #
# # # # # # # # # # # # # # # # # ##
# # # # # # # # # # # # # # #
# # # # # # # # # ##### ##### # # #
# # # ####### # # ####### # # # # #
# # # # # # # # # # # # # # # # # #
##### ### ##### # # ###### # # ##### ##### ### #####'''
input("\n Для выхода нажми Enter")
sys.exit()
sms()
| [
"noreply@github.com"
] | renauddahou.noreply@github.com |
8531d5d6b9c1ce71600a47c0591f26494c47860b | e22c17c4f6b83a48a5bbe75bc35ad2132d93ebce | /opl/migrations/0008_auto_20200305_1559.py | 42b9942ce85a1c8cd5a98bd086312d86a9ec9375 | [] | no_license | MATT143/Snippets | f568b4117f2fe097ea5611e0bab764c4e13bb724 | 17a816b926a3ec5e9658739801d6bf3095b0128a | refs/heads/master | 2022-04-23T01:40:50.447026 | 2020-04-18T04:09:25 | 2020-04-18T04:09:25 | 256,666,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | # Generated by Django 2.2.11 on 2020-03-05 10:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('opl', '0007_auto_20200305_1302'),
]
operations = [
migrations.AddField(
model_name='oplorderdetails',
name='subscriptionId',
field=models.CharField(default=None, max_length=20),
),
migrations.AlterField(
model_name='oplorderdetails',
name='subRefId',
field=models.CharField(max_length=20),
),
]
| [
"mnahak@cisco.com"
] | mnahak@cisco.com |
a275f1663018082cb7d363a7f67a0cfe839189c9 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/firestore/admin/v1/firestore-admin-v1-py/docs/conf.py | 3513da45e7d36bd65dd15c3210f92557a28b990f | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,557 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# google-cloud-firestore-admin documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.6.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Allow markdown includes (so releases.md can include CHANGLEOG.md)
# http://www.sphinx-doc.org/en/master/markdown.html
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-firestore-admin"
copyright = u"2020, Google, LLC"
author = u"Google APIs" # TODO: autogenerate this bit
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for Python",
"github_user": "googleapis",
"github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-firestore-admin-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-firestore-admin.tex",
u"google-cloud-firestore-admin Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-firestore-admin",
u"Google Cloud Firestore Admin Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-firestore-admin",
u"google-cloud-firestore-admin Documentation",
author,
"google-cloud-firestore-admin",
"GAPIC library for Google Cloud Firestore Admin API",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google-gax": ("https://gax-python.readthedocs.io/en/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None),
"grpc": ("https://grpc.io/grpc/python/", None),
"requests": ("http://requests.kennethreitz.org/en/stable/", None),
"proto": ("https://proto-plus-python.readthedocs.io/en/stable", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
6415c261c52b6628e5b7cacc8a70924cc04f753a | 53ed8b8d650439ba9aac764f5de5d96b67cbd77a | /867. Transpose Matrix.py | b2f1e40fd1c10e63a14c2f6c0f60df89da6dad8c | [] | no_license | IrwinLai/LeetCode | df49d152b4bf439c966afa53eecfe3022fb043ae | 779c3a98d9052a12d319c0219324e5d0f5517fc6 | refs/heads/master | 2021-07-03T08:02:55.425285 | 2021-03-21T11:19:46 | 2021-03-21T11:19:46 | 221,848,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | class Solution:
def transpose(self, A: List[List[int]]) -> List[List[int]]:
ret = [[] for i in range(len(A[0]))]
for r in range(len(A)):
for c in range(len(A[0])):
ret[c].append(A[r][c])
return ret | [
"noreply@github.com"
] | IrwinLai.noreply@github.com |
f1744beb2febf20b8a713a115b337d00d30f9ac4 | b1b77bb1ed47586f96d8f2554a65bcbd0c7162cc | /SPOTIFY/luigi/test/task_test.py | 80e08c956da8480bac9efee1efecb013d1ca667e | [] | no_license | DanHefrman/stuff | b3624d7089909972ee806211666374a261c02d08 | b98a5c80cfe7041d8908dcfd4230cf065c17f3f6 | refs/heads/master | 2023-07-10T09:47:04.780112 | 2021-08-13T09:55:17 | 2021-08-13T09:55:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,347 | py | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import doctest
import pickle
import warnings
from helpers import unittest, LuigiTestCase
from datetime import datetime, timedelta
import luigi
import luigi.task
import luigi.util
import collections
from luigi.task_register import load_task
class DummyTask(luigi.Task):
param = luigi.Parameter()
bool_param = luigi.BoolParameter()
int_param = luigi.IntParameter()
float_param = luigi.FloatParameter()
date_param = luigi.DateParameter()
datehour_param = luigi.DateHourParameter()
timedelta_param = luigi.TimeDeltaParameter()
insignificant_param = luigi.Parameter(significant=False)
DUMMY_TASK_OK_PARAMS = dict(
param='test',
bool_param=True,
int_param=666,
float_param=123.456,
date_param=datetime(2014, 9, 13).date(),
datehour_param=datetime(2014, 9, 13, 9),
timedelta_param=timedelta(44), # doesn't support seconds
insignificant_param='test')
class DefaultInsignificantParamTask(luigi.Task):
insignificant_param = luigi.Parameter(significant=False, default='value')
necessary_param = luigi.Parameter(significant=False)
class TaskTest(unittest.TestCase):
def test_tasks_doctest(self):
doctest.testmod(luigi.task)
def test_task_to_str_to_task(self):
original = DummyTask(**DUMMY_TASK_OK_PARAMS)
other = DummyTask.from_str_params(original.to_str_params())
self.assertEqual(original, other)
def test_task_from_str_insignificant(self):
params = {'necessary_param': 'needed'}
original = DefaultInsignificantParamTask(**params)
other = DefaultInsignificantParamTask.from_str_params(params)
self.assertEqual(original, other)
def test_task_missing_necessary_param(self):
with self.assertRaises(luigi.parameter.MissingParameterException):
DefaultInsignificantParamTask.from_str_params({})
def test_external_tasks_loadable(self):
task = load_task("luigi", "ExternalTask", {})
assert(isinstance(task, luigi.ExternalTask))
def test_getpaths(self):
class RequiredTask(luigi.Task):
def output(self):
return luigi.LocalTarget("/path/to/target/file")
t = RequiredTask()
reqs = {}
reqs["bare"] = t
reqs["dict"] = {"key": t}
reqs["OrderedDict"] = collections.OrderedDict([("key", t)])
reqs["list"] = [t]
reqs["tuple"] = (t,)
reqs["generator"] = (t for _ in range(10))
struct = luigi.task.getpaths(reqs)
self.assertIsInstance(struct, dict)
self.assertIsInstance(struct["bare"], luigi.Target)
self.assertIsInstance(struct["dict"], dict)
self.assertIsInstance(struct["OrderedDict"], collections.OrderedDict)
self.assertIsInstance(struct["list"], list)
self.assertIsInstance(struct["tuple"], tuple)
self.assertTrue(hasattr(struct["generator"], "__iter__"))
def test_flatten(self):
flatten = luigi.task.flatten
self.assertEqual(sorted(flatten({'a': 'foo', 'b': 'bar'})), ['bar', 'foo'])
self.assertEqual(sorted(flatten(['foo', ['bar', 'troll']])), ['bar', 'foo', 'troll'])
self.assertEqual(flatten('foo'), ['foo'])
self.assertEqual(flatten(42), [42])
self.assertEqual(flatten((len(i) for i in ["foo", "troll"])), [3, 5])
self.assertRaises(TypeError, flatten, (len(i) for i in ["foo", "troll", None]))
def test_externalized_task_picklable(self):
task = luigi.task.externalize(luigi.Task())
pickled_task = pickle.dumps(task)
self.assertEqual(task, pickle.loads(pickled_task))
def test_no_unpicklable_properties(self):
task = luigi.Task()
task.set_tracking_url = lambda tracking_url: tracking_url
task.set_status_message = lambda message: message
with task.no_unpicklable_properties():
pickle.dumps(task)
self.assertIsNotNone(task.set_tracking_url)
self.assertIsNotNone(task.set_status_message)
tracking_url = task.set_tracking_url('http://test.luigi.com/')
self.assertEqual(tracking_url, 'http://test.luigi.com/')
message = task.set_status_message('message')
self.assertEqual(message, 'message')
def test_no_warn_if_param_types_ok(self):
with warnings.catch_warnings(record=True) as w:
DummyTask(**DUMMY_TASK_OK_PARAMS)
self.assertEqual(len(w), 0, msg='No warning should be raised when correct parameter types are used')
def test_warn_on_non_str_param(self):
params = dict(**DUMMY_TASK_OK_PARAMS)
params['param'] = 42
with self.assertWarnsRegex(UserWarning, 'Parameter "param" with value "42" is not of type string.'):
DummyTask(**params)
def test_warn_on_non_timedelta_param(self):
params = dict(**DUMMY_TASK_OK_PARAMS)
class MockTimedelta:
days = 1
seconds = 1
params['timedelta_param'] = MockTimedelta()
with self.assertWarnsRegex(UserWarning, 'Parameter "timedelta_param" with value ".*" is not of type timedelta.'):
DummyTask(**params)
class ExternalizeTaskTest(LuigiTestCase):
def test_externalize_taskclass(self):
class MyTask(luigi.Task):
def run(self):
pass
self.assertIsNotNone(MyTask.run) # Assert what we believe
task_object = luigi.task.externalize(MyTask)()
self.assertIsNone(task_object.run)
self.assertIsNotNone(MyTask.run) # Check immutability
self.assertIsNotNone(MyTask().run) # Check immutability
def test_externalize_taskobject(self):
class MyTask(luigi.Task):
def run(self):
pass
task_object = luigi.task.externalize(MyTask())
self.assertIsNone(task_object.run)
self.assertIsNotNone(MyTask.run) # Check immutability
self.assertIsNotNone(MyTask().run) # Check immutability
def test_externalize_taskclass_readable_name(self):
class MyTask(luigi.Task):
def run(self):
pass
task_class = luigi.task.externalize(MyTask)
self.assertIsNot(task_class, MyTask)
self.assertIn("MyTask", task_class.__name__)
def test_externalize_taskclass_instance_cache(self):
class MyTask(luigi.Task):
def run(self):
pass
task_class = luigi.task.externalize(MyTask)
self.assertIsNot(task_class, MyTask)
self.assertIs(MyTask(), MyTask()) # Assert it have enabled the instance caching
self.assertIsNot(task_class(), MyTask()) # Now, they should not be the same of course
def test_externalize_same_id(self):
class MyTask(luigi.Task):
def run(self):
pass
task_normal = MyTask()
task_ext_1 = luigi.task.externalize(MyTask)()
task_ext_2 = luigi.task.externalize(MyTask())
self.assertEqual(task_normal.task_id, task_ext_1.task_id)
self.assertEqual(task_normal.task_id, task_ext_2.task_id)
def test_externalize_same_id_with_task_namespace(self):
# Dependent on the new behavior from spotify/luigi#1953
class MyTask(luigi.Task):
task_namespace = "something.domething"
def run(self):
pass
task_normal = MyTask()
task_ext_1 = luigi.task.externalize(MyTask())
task_ext_2 = luigi.task.externalize(MyTask)()
self.assertEqual(task_normal.task_id, task_ext_1.task_id)
self.assertEqual(task_normal.task_id, task_ext_2.task_id)
self.assertEqual(str(task_normal), str(task_ext_1))
self.assertEqual(str(task_normal), str(task_ext_2))
def test_externalize_same_id_with_luigi_namespace(self):
# Dependent on the new behavior from spotify/luigi#1953
luigi.namespace('lets.externalize')
class MyTask(luigi.Task):
def run(self):
pass
luigi.namespace()
task_normal = MyTask()
task_ext_1 = luigi.task.externalize(MyTask())
task_ext_2 = luigi.task.externalize(MyTask)()
self.assertEqual(task_normal.task_id, task_ext_1.task_id)
self.assertEqual(task_normal.task_id, task_ext_2.task_id)
self.assertEqual(str(task_normal), str(task_ext_1))
self.assertEqual(str(task_normal), str(task_ext_2))
def test_externalize_with_requires(self):
class MyTask(luigi.Task):
def run(self):
pass
@luigi.util.requires(luigi.task.externalize(MyTask))
class Requirer(luigi.Task):
def run(self):
pass
self.assertIsNotNone(MyTask.run) # Check immutability
self.assertIsNotNone(MyTask().run) # Check immutability
def test_externalize_doesnt_affect_the_registry(self):
class MyTask(luigi.Task):
pass
reg_orig = luigi.task_register.Register._get_reg()
luigi.task.externalize(MyTask)
reg_afterwards = luigi.task_register.Register._get_reg()
self.assertEqual(reg_orig, reg_afterwards)
def test_can_uniquely_command_line_parse(self):
class MyTask(luigi.Task):
pass
# This first check is just an assumption rather than assertion
self.assertTrue(self.run_locally(['MyTask']))
luigi.task.externalize(MyTask)
# Now we check we don't encounter "ambiguous task" issues
self.assertTrue(self.run_locally(['MyTask']))
# We do this once again, is there previously was a bug like this.
luigi.task.externalize(MyTask)
self.assertTrue(self.run_locally(['MyTask']))
class TaskNamespaceTest(LuigiTestCase):
def setup_tasks(self):
class Foo(luigi.Task):
pass
class FooSubclass(Foo):
pass
return (Foo, FooSubclass, self.go_mynamespace())
def go_mynamespace(self):
luigi.namespace("mynamespace")
class Foo(luigi.Task):
p = luigi.IntParameter()
class Bar(Foo):
task_namespace = "othernamespace" # namespace override
class Baz(Bar): # inherits namespace for Bar
pass
luigi.namespace()
return collections.namedtuple('mynamespace', 'Foo Bar Baz')(Foo, Bar, Baz)
def test_vanilla(self):
(Foo, FooSubclass, namespace_test_helper) = self.setup_tasks()
self.assertEqual(Foo.task_family, "Foo")
self.assertEqual(str(Foo()), "Foo()")
self.assertEqual(FooSubclass.task_family, "FooSubclass")
self.assertEqual(str(FooSubclass()), "FooSubclass()")
def test_namespace(self):
(Foo, FooSubclass, namespace_test_helper) = self.setup_tasks()
self.assertEqual(namespace_test_helper.Foo.task_family, "mynamespace.Foo")
self.assertEqual(str(namespace_test_helper.Foo(1)), "mynamespace.Foo(p=1)")
self.assertEqual(namespace_test_helper.Bar.task_namespace, "othernamespace")
self.assertEqual(namespace_test_helper.Bar.task_family, "othernamespace.Bar")
self.assertEqual(str(namespace_test_helper.Bar(1)), "othernamespace.Bar(p=1)")
self.assertEqual(namespace_test_helper.Baz.task_namespace, "othernamespace")
self.assertEqual(namespace_test_helper.Baz.task_family, "othernamespace.Baz")
self.assertEqual(str(namespace_test_helper.Baz(1)), "othernamespace.Baz(p=1)")
def test_uses_latest_namespace(self):
luigi.namespace('a')
class _BaseTask(luigi.Task):
pass
luigi.namespace('b')
class _ChildTask(_BaseTask):
pass
luigi.namespace() # Reset everything
child_task = _ChildTask()
self.assertEqual(child_task.task_family, 'b._ChildTask')
self.assertEqual(str(child_task), 'b._ChildTask()')
def test_with_scope(self):
luigi.namespace('wohoo', scope='task_test')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='task_test')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), 'wohoo')
def test_with_scope_not_matching(self):
luigi.namespace('wohoo', scope='incorrect_namespace')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='incorrect_namespace')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), 'bleh')
class AutoNamespaceTest(LuigiTestCase):
this_module = 'task_test'
def test_auto_namespace_global(self):
luigi.auto_namespace()
class MyTask(luigi.Task):
pass
luigi.namespace()
self.assertEqual(MyTask.get_task_namespace(), self.this_module)
def test_auto_namespace_scope(self):
luigi.auto_namespace(scope='task_test')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='task_test')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), self.this_module)
def test_auto_namespace_not_matching(self):
luigi.auto_namespace(scope='incorrect_namespace')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='incorrect_namespace')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), 'bleh')
def test_auto_namespace_not_matching_2(self):
luigi.auto_namespace(scope='incorrect_namespace')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='incorrect_namespace')
self.assertEqual(MyTask.get_task_namespace(), '')
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
294df0914a41007122338f4e5fa1725bdd8373de | ed257bb11c2916f8e849a753911f9cf866687774 | /code/experiment_001.py | a5b859e304fa0846047472a516150bb217036621 | [
"MIT"
] | permissive | forero/BetaSkeleton | 737000f5f0a132a0040b5184c90a803490cbcdd9 | 9714f11904bb9c990285815bd29303c08d6aafac | refs/heads/master | 2020-04-06T03:57:08.299294 | 2014-07-31T20:39:14 | 2014-07-31T20:39:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | from subprocess import call
total_mocks = 27
base_mass = [1.0E13]
base_f_in = [1.0]
rsd_options = [1,0]
beta_values = [1,2,5,10,20,30]
min_theta=90.0
max_theta=180.0
min_phi=[0.0,89.0]
max_phi=[1.0,90.0]
min_r=0.0
max_r=2000.0
omega_m_values = [0.30]
w_values = [-1.0]
for i_mock in range(total_mocks):
for rsd in rsd_options:
for beta in beta_values:
for i_mass, i_f_in in zip(base_mass, base_f_in):
for i_min_phi, i_max_phi in zip(min_phi, max_phi):
for w in w_values:
for omega_m in omega_m_values:
command_all=\
"make -f Makefile %s MOCK_ID=%02d BETA=%d CUT_MASS=%.1E FRAC=%.4f \
RSD=%d MIN_THETA=%.1f MAX_THETA=%.1f MIN_PHI=%.1f MAX_PHI=%.1f MIN_R=%.1f MAX_R=%.1f \
OMEGA_M=%.2f OMEGA_L=%.2f W=%.1f" \
%("all", i_mock, beta, i_mass, i_f_in, rsd, min_theta, max_theta, \
i_min_phi, i_max_phi, min_r, max_r, omega_m, 1.0 - omega_m, w)
print command_all
retcode = call(command_all,shell=True)
| [
"j.e.forero.romero@gmail.com"
] | j.e.forero.romero@gmail.com |
4dde39c66097431c01d8dbfe496e121fd7f4e9b6 | a1b42a61f2f179ee0a12746d9526253ab3a407c8 | /data/boada/analysis_all/MLmethods/calc_errors_ML.py | d3e84a3acc0ac2df776c97a138a636a2b70de5ae | [
"MIT"
] | permissive | sazabi4/vpCluster | 1436c3df8d6721d67ef7dcc68c381b2bd776c45b | d0bf5e209c83b3d7781997066d61181fe60bf3af | refs/heads/master | 2021-01-16T21:27:29.776100 | 2016-04-26T20:36:21 | 2016-04-26T20:36:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,588 | py | import numpy as np
import h5py as hdf
from scipy import stats
def error(true, pred, mu):
''' Unused, but kept to see how I did it when I wasn't using the Scipy
functions. Calculates the error on the mean.
'''
print true.size,
if true.size > 1:
var = np.sum((pred - true - mu)**2) /(true.size - 1)
sem = np.sqrt(var/true.size)
return sem
elif true.size == 1:
return 0
else:
return np.nan
def bias(true, pred):
''' unused, but calculates the mean bias. '''
if true.size > 0:
return np.sum(pred - true) /true.size
#return np.median(true)
else:
return np.nan
def runningStatistic(stat, true, pred, **kwargs):
''' b = bias and s = uncertainty on that bias '''
bins = np.arange(11.5,16,0.5)
indx = np.digitize(true, bins)-1
binNumber = len(bins)
runningb = []
runnings = []
for k in xrange(binNumber):
print true[indx==k].size,
b = np.mean(pred[indx==k] - true[indx==k])
s = stats.sem(pred[indx==k] - true[indx==k])
print '$%.2f\pm{%.2f}$ &' % (b,s)
try:
mean, var, std = stats.mvsdist(pred[indx==k] - true[indx==k])
#print '$%.2f\pm{%.2f}$ &' % (std.mean(),std.std()),
except ValueError:
pass
#print '$%.2f\pm{%.2f}$ &' % (np.nan,np.nan),
runningb.append(b)
runnings.append(s)
print ''
return
### Targeted ###
################
with hdf.File('./buzzard_targetedRealistic_masses.hdf5', 'r') as f:
dset = f[f.keys()[0]]
target = dset['M200c', 'MASS', 'ML_pred_1d', 'ML_pred_2d', 'ML_pred_3d']
# filter bad values
mask = (target['ML_pred_1d'] != 0)
target = target[mask]
for d in [target]:
### Full survey ###
mean, var, std = stats.mvsdist(np.log10(d['MASS']) - np.log10(d['M200c']))
s = stats.sem(np.log10(d['MASS']) - np.log10(d['M200c']))
#print '$%.2f\pm{%.3f}$' % (mean.mean(),s)
print '$%.2f\pm{%.3f}$' % (std.mean(), std.std())
print('power law')
running = runningStatistic(bias, np.log10(d['M200c']),
np.log10(d['MASS']))
############
#### 1d ####
############
print('1d')
running = runningStatistic(bias, np.log10(d['M200c']),
d['ML_pred_1d'])
#############
#### 2d #####
#############
print('2d')
running = runningStatistic(bias, np.log10(d['M200c']),
d['ML_pred_2d'])
##############
##### 3d #####
##############
print('3d')
running = runningStatistic(bias, np.log10(d['M200c']),
d['ML_pred_3d'])
print '-----'
| [
"stevenboada@gmail.com"
] | stevenboada@gmail.com |
3ff28d2310a0f022eadd873154775534e6ed3f7d | 4936c1d20aef7a93ad2ded2f5731b102631ad8b2 | /Tablas/tablas/Ruido20/maxVotos/EF/menu_1.py | bbbc4a429fa3ea9ad1f3048aeb9cae99d255b9a6 | [
"LicenseRef-scancode-other-permissive"
] | permissive | jcarlosorte/pyMIL-BNF | 530f60081607deecfee7c72264000c0ba34984fe | 36e282e35242815bf57310db98707da70d69b183 | refs/heads/master | 2022-11-12T20:58:49.058513 | 2020-07-06T15:35:01 | 2020-07-06T15:35:01 | 182,646,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 10 10:11:00 2019
@author: Usuario
"""
import sys,os,warnings
os.chdir('../../../../MILpy')
sys.path.append(os.path.realpath('..'))
warnings.filterwarnings('ignore')
#from funciones import fvc
from filters import EF
from filters import CVCF
from filters import IPF
folds = 5
votacion = 'maxVotos'
DataSet = ['musk1_scaled']
#ruido = [0,5,10,15,20,25,30]
ruido = [20]
#print('********** Crear dataset con ruido **********')
#fvc.fvc_part(DataSet,folds,ruido)
print('********** Ensemble Filter por '+str(votacion)+'**********')
EF.EF(DataSet,votacion,folds,ruido)
#print('********** CV Committees Filter por '+str(votacion)+'**********')
#CVCF.CVcF(DataSet,votacion,folds,ruido)
#print('********** Iterative Partitioning Filter por '+str(votacion)+'**********')
#IPF.IPF(DataSet,votacion,folds,ruido)
#votacion = 'maxVotos'
#print('********** Ensemble Filter por '+str(votacion)+'**********')
#EF.EF(DataSet,votacion,folds,ruido)
#print('********** CV Committees Filter por '+str(votacion)+'**********')
#CVCF.CVcF(DataSet,votacion,folds,ruido)
#print('********** Iterative Partitioning Filter por '+str(votacion)+'**********')
#IPF.IPF(DataSet,votacion,folds,ruido) | [
"jcarlosorte@ugr.es"
] | jcarlosorte@ugr.es |
8e9f82bbc6dacde92d1f90f2d143e408042e520f | d15ed15aa3df11ce3bc5a007d65dc90ad7b7471d | /go.py | b6b362f5b72c686da4ac1f1525c638befe53b2e4 | [] | no_license | dansgithubuser/dansMap | 95947005c74f975355858f4b059b8913410814e9 | 48e035b1d6c308e83d5ddb5884475bfb88fb3eae | refs/heads/master | 2020-03-17T02:18:56.329812 | 2018-06-24T15:34:22 | 2018-06-24T15:34:22 | 133,185,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | import os
import sys
DIR=os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(DIR, 'deps'))
import djangogo
parser=djangogo.make_parser()
args=parser.parse_args()
djangogo.main(args,
project='dansmap',
app='map',
database='map_database',
user='map_database_user',
heroku_repo='https://git.heroku.com/safe-everglades-62273.git',
heroku_url='https://safe-everglades-62273.herokuapp.com',
)
| [
"dansonlinepresence@gmail.com"
] | dansonlinepresence@gmail.com |
2da4bc5d14efc19541749e60986d62a072c681ff | 75c96e6070fb5c2473a7ae3be30a2d3c9bd9301a | /src/colormap/__init__.py | 0266932dba1e3d059d933f1638b9e05c1b604ac1 | [
"BSD-3-Clause"
] | permissive | Akronix/colormap | c727c80c52d8f0a545b4a54974569315319113e9 | e6d9a6e8bc9e3b3ac530a48577a5bee857267304 | refs/heads/master | 2020-08-05T04:16:34.328499 | 2018-12-29T15:10:18 | 2018-12-29T15:10:18 | 212,390,848 | 1 | 0 | BSD-3-Clause | 2019-10-02T16:37:32 | 2019-10-02T16:37:31 | null | UTF-8 | Python | false | false | 1,003 | py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# This file is part of the colormap software
#
# Copyright (c) 2014
#
# File author(s): Thomas Cokelaer <cokelaer@gmail.com>
#
# Distributed under the GPLv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# Website: https://www.github.com/cokelaer/colormap
# Documentation: http://packages.python.org/colormap
#
##############################################################################
"""main colormap module"""
from __future__ import print_function
from __future__ import division
import pkg_resources
try:
version = pkg_resources.require("colormap")[0].version
__version__ = version
except Exception:
version = ''
from .xfree86 import *
from . import colors
from .colors import *
from .get_cmap import *
c = Colormap()
colormap_names = c.colormaps + c.diverging_black
# create an alias to test_colormap methiod
test_colormap = c.test_colormap
test_cmap = c.test_colormap
| [
"cokelaer@gmail.com"
] | cokelaer@gmail.com |
d8d1fab3a4dd38f701ee8cb531edb455f731e1e9 | 368fec101daec272c8d44d592558906ee8043bc1 | /tradefed_cluster/util/ndb_util_test.py | 7d229b359db6b182129e9bc29496883828aabea4 | [
"Apache-2.0"
] | permissive | maksonlee/tradefed_cluster | 3acb0a899c073315c3e80b830784ec94a201a085 | 0568fc1d9b9dca79aed2de493955ce1adebb1d6b | refs/heads/master | 2023-08-09T17:35:46.045476 | 2023-07-21T18:54:26 | 2023-07-21T18:54:54 | 369,842,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | # Copyright 202 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ndb_util."""
import unittest
from google.cloud import ndb
from tradefed_cluster import testbed_dependent_test
from tradefed_cluster.util import ndb_util
def _MockModelRenameFooToBar(obj):
obj.bar = obj.foo
def _MockModelRenameBarToZzz(obj):
obj.zzz = obj.bar
class MockModel(ndb_util.UpgradableModel):
foo = ndb.StringProperty()
bar = ndb.StringProperty()
zzz = ndb.StringProperty()
_upgrade_steps = [
_MockModelRenameFooToBar,
_MockModelRenameBarToZzz,
]
class UpgradableModelTest(testbed_dependent_test.TestbedDependentTest):
def testUpgrade(self):
obj = MockModel(foo='foo')
obj.schema_version = 0
obj.Upgrade()
self.assertEqual(obj.zzz, 'foo')
def testUpgrade_oneVersion(self):
obj = MockModel(bar='foo')
obj.schema_version = 1
obj.Upgrade()
self.assertEqual(obj.zzz, 'foo')
def testUpgrade_latestVersion(self):
obj = MockModel(zzz='zzz')
obj.put()
obj.Upgrade()
self.assertEqual(obj.zzz, 'zzz')
def testPostGetHook(self):
obj = MockModel(foo='foo')
obj.schema_version = 0
obj.put()
obj = obj.key.get()
self.assertEqual(obj.zzz, 'foo')
if __name__ == '__main__':
unittest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
4358059093f7fef6e061d934bfa2b80593531bc6 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /jaxnerf/eval.py | 9ece8a83f7159aa0df74218fffb0c3781352ec66 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 5,263 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation script for Nerf."""
import functools
from os import path
from absl import app
from absl import flags
import flax
from flax.metrics import tensorboard
from flax.training import checkpoints
import jax
from jax import random
import numpy as np
from jaxnerf.nerf import datasets
from jaxnerf.nerf import models
from jaxnerf.nerf import utils
FLAGS = flags.FLAGS
utils.define_flags()
def main(unused_argv):
rng = random.PRNGKey(20200823)
if FLAGS.config is not None:
utils.update_flags(FLAGS)
if FLAGS.train_dir is None:
raise ValueError("train_dir must be set. None set now.")
if FLAGS.data_dir is None:
raise ValueError("data_dir must be set. None set now.")
dataset = datasets.get_dataset("test", FLAGS)
rng, key = random.split(rng)
model, init_variables = models.get_model(key, dataset.peek(), FLAGS)
optimizer = flax.optim.Adam(FLAGS.lr_init).create(init_variables)
state = utils.TrainState(optimizer=optimizer)
del optimizer, init_variables
# Rendering is forced to be deterministic even if training was randomized, as
# this eliminates "speckle" artifacts.
def render_fn(variables, key_0, key_1, rays):
return jax.lax.all_gather(
model.apply(variables, key_0, key_1, rays, False), axis_name="batch")
# pmap over only the data input.
render_pfn = jax.pmap(
render_fn,
in_axes=(None, None, None, 0),
donate_argnums=3,
axis_name="batch",
)
# Compiling to the CPU because it's faster and more accurate.
ssim_fn = jax.jit(
functools.partial(utils.compute_ssim, max_val=1.), backend="cpu")
last_step = 0
out_dir = path.join(FLAGS.train_dir,
"path_renders" if FLAGS.render_path else "test_preds")
if not FLAGS.eval_once:
summary_writer = tensorboard.SummaryWriter(
path.join(FLAGS.train_dir, "eval"))
while True:
state = checkpoints.restore_checkpoint(FLAGS.train_dir, state)
step = int(state.optimizer.state.step)
if step <= last_step:
continue
if FLAGS.save_output and (not utils.isdir(out_dir)):
utils.makedirs(out_dir)
psnr_values = []
ssim_values = []
if not FLAGS.eval_once:
showcase_index = np.random.randint(0, dataset.size)
for idx in range(dataset.size):
print(f"Evaluating {idx+1}/{dataset.size}")
batch = next(dataset)
pred_color, pred_disp, pred_acc = utils.render_image(
functools.partial(render_pfn, state.optimizer.target),
batch["rays"],
rng,
FLAGS.dataset == "llff",
chunk=FLAGS.chunk)
if jax.host_id() != 0: # Only record via host 0.
continue
if not FLAGS.eval_once and idx == showcase_index:
showcase_color = pred_color
showcase_disp = pred_disp
showcase_acc = pred_acc
if not FLAGS.render_path:
showcase_gt = batch["pixels"]
if not FLAGS.render_path:
psnr = utils.compute_psnr(((pred_color - batch["pixels"])**2).mean())
ssim = ssim_fn(pred_color, batch["pixels"])
print(f"PSNR = {psnr:.4f}, SSIM = {ssim:.4f}")
psnr_values.append(float(psnr))
ssim_values.append(float(ssim))
if FLAGS.save_output:
utils.save_img(pred_color, path.join(out_dir, "{:03d}.png".format(idx)))
utils.save_img(pred_disp[Ellipsis, 0],
path.join(out_dir, "disp_{:03d}.png".format(idx)))
if (not FLAGS.eval_once) and (jax.host_id() == 0):
summary_writer.image("pred_color", showcase_color, step)
summary_writer.image("pred_disp", showcase_disp, step)
summary_writer.image("pred_acc", showcase_acc, step)
if not FLAGS.render_path:
summary_writer.scalar("psnr", np.mean(np.array(psnr_values)), step)
summary_writer.scalar("ssim", np.mean(np.array(ssim_values)), step)
summary_writer.image("target", showcase_gt, step)
if FLAGS.save_output and (not FLAGS.render_path) and (jax.host_id() == 0):
with utils.open_file(path.join(out_dir, f"psnrs_{step}.txt"), "w") as f:
f.write(" ".join([str(v) for v in psnr_values]))
with utils.open_file(path.join(out_dir, f"ssims_{step}.txt"), "w") as f:
f.write(" ".join([str(v) for v in ssim_values]))
with utils.open_file(path.join(out_dir, "psnr.txt"), "w") as f:
f.write("{}".format(np.mean(np.array(psnr_values))))
with utils.open_file(path.join(out_dir, "ssim.txt"), "w") as f:
f.write("{}".format(np.mean(np.array(ssim_values))))
if FLAGS.eval_once:
break
if int(step) >= FLAGS.max_steps:
break
last_step = step
if __name__ == "__main__":
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
fc21f78fac993994c6c1d017982080abc3e00a18 | 762c307de73db674c214619778802b863548bf2e | /env/bin/pilfont.py | ea37f1eb1170b5b23568a42c9f4bb924effcd14d | [] | no_license | mansourgueye275/django-bloggy | 56d9d6a2131a71c20d6c341764503b76ba3a45c1 | 1b8080ad26244d3d60e20e24ad6520d7a663381b | refs/heads/master | 2021-06-23T02:16:30.301697 | 2017-09-01T18:56:23 | 2017-09-01T18:56:23 | 102,141,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | #!/home/mansour/Documents/RealPython/django-bloggy/env/bin/python3
#
# The Python Imaging Library
# $Id$
#
# PIL raster font compiler
#
# history:
# 1997-08-25 fl created
# 2002-03-10 fl use "from PIL import"
#
from __future__ import print_function
import glob
import sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
VERSION = "0.4"
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek(0)
p = BdfFontFile.BdfFontFile(fp)
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
| [
"zoe14@live.fr"
] | zoe14@live.fr |
3591a5c5b02eaef9d5ce5f72aab5bcfda5dc4e60 | 6febc1719503d0f9dbc97f6b1202116370391b10 | /clarico/clarico_category/controllers/main.py | 1a9f165596162d8f4442ae3d08af86cabf4b192d | [] | no_license | arshakil/Odoo-Development | 5c6a1795cd64a8ebef5abfdf7d6245804594bcd8 | df37f6e8c2f7d89cdbdb36d0a8fd501ef8bfe563 | refs/heads/master | 2022-12-11T05:17:12.123339 | 2020-07-28T07:38:58 | 2020-07-28T07:38:58 | 248,154,189 | 0 | 2 | null | 2022-12-08T03:51:50 | 2020-03-18T06:20:59 | Python | UTF-8 | Python | false | false | 587 | py | from odoo import http
from odoo.http import request
from odoo import SUPERUSER_ID
from odoo import models, fields, api
class claricoCategory(http.Controller):
@http.route(['/showcase_data'],type='json', auth='public', website=True , csrf=False, cache=30)
def category_data(self,template,limit=10):
data=request.env['product.public.category'].search([['parent_id','=',False]],limit=limit)
values = {'object':data}
return request.env.ref(template).render(values)
| [
"azizur.rahman363410@gmail.com"
] | azizur.rahman363410@gmail.com |
f2ab634a4d0e2b54bbdbd6cb5b20849ba87ef995 | 0db86f23fd8f6ff3b6119db0b7fab0f8522611f6 | /Intro_To_Python/HW17/census.py | 4eecf12f9f57d54ea047c9f2da4485ce67a78d9b | [] | no_license | justinminsk/Python_Files | 8a6d96ecc6596e19413b35758d3234900d2381b6 | 68b3815fae58bc2d7ec86bcd42d46354d8b3d2f0 | refs/heads/master | 2021-05-15T11:24:25.473043 | 2018-02-08T20:23:12 | 2018-02-08T20:23:12 | 108,322,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,620 | py | from sqlite3 import *
def make_database():
dataframe = connect('census.db')
return dataframe
def make_db_table():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('CREATE TABLE Density(Province TEXT, Population INTEGER, Area REAL)')
dataframe.commit()
return df
def add_entries():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('CREATE TABLE Density(Province TEXT, Population INTEGER, Area REAL)')
dataframe.commit()
table = [
('Newfoundland and Labrador', 512930, 370501.69),
('Prince Edward Island', 135294, 5684.39),
('Nova Scotia', 908007, 52917.43),
('New Brunswick', 729498, 71355.67),
('Quebec', 7237479, 1357743.08),
('Ontario', 11410046, 907655.59),
('Manitoba', 1119583, 551937.87),
('Saskatchewan', 978933, 586561.35),
('Alberta', 2974807, 639987.12),
('British Columbia', 3907738, 926492.48),
('Yukon Territory', 28674, 474706.97),
('Northwest Territories', 37360, 1141108.37),
('Nunavut', 26745, 1925460.18),
]
for line in table:
df.execute('INSERT INTO Density VALUES (?, ?, ?)', line)
dataframe.commit()
def get_content():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('CREATE TABLE Density(Province TEXT, Population INTEGER, Area REAL)')
dataframe.commit()
table = [
('Newfoundland and Labrador', 512930, 370501.69),
('Prince Edward Island', 135294, 5684.39),
('Nova Scotia', 908007, 52917.43),
('New Brunswick', 729498, 71355.67),
('Quebec', 7237479, 1357743.08),
('Ontario', 11410046, 907655.59),
('Manitoba', 1119583, 551937.87),
('Saskatchewan', 978933, 586561.35),
('Alberta', 2974807, 639987.12),
('British Columbia', 3907738, 926492.48),
('Yukon Territory', 28674, 474706.97),
('Northwest Territories', 37360, 1141108.37),
('Nunavut', 26745, 1925460.18),
]
for line in table:
df.execute('INSERT INTO Density VALUES (?, ?, ?)', line)
dataframe.commit()
df.execute('SELECT * FROM Density')
for line in df.fetchall():
print(line)
def get_pop():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('SELECT Population FROM Density')
for line in df.fetchall():
print(line)
def get_prov_lt10mill():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('SELECT Province FROM Density WHERE Population < 1000000')
for line in df.fetchall():
print(line)
def get_prov_lt10mill_gt5mill():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('SELECT Province FROM Density WHERE (Population < 1000000 or Population > 5000000)')
for line in df.fetchall():
print(line)
def get_prov_nlt10mill_ngt5mill():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('SELECT Province FROM Density WHERE NOT(Population < 1000000 or Population > 5000000)')
for line in df.fetchall():
print(line)
def get_prov_landgt200th():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('SELECT Province FROM Density WHERE Area > 200000')
for line in df.fetchall():
print(line)
def get_popden():
dataframe = connect('census.db')
df = dataframe.cursor()
df.execute('SELECT Province, Population / Area FROM Density')
for line in df.fetchall():
print(line)
if __name__ == '__main__':
get_popden()
| [
"justin.minsk@gmail.com"
] | justin.minsk@gmail.com |
72a5ef263bf35a3f4944b9f1c6311a9be39457da | e1757740bef23814319c7edcb4d77f81fcc0d8f5 | /lookerpy/apis/dashboard_api.py | 3023c7d31b6b704db47c72e9c78038446e47ea67 | [] | no_license | bufferapp/lookerpy | 65a43c89c05d49caa00e52d223fa61f941054b2d | e81634f15bff006a0643320a41175861d9990e4c | refs/heads/master | 2021-01-01T05:16:14.891450 | 2017-10-05T12:48:44 | 2017-10-05T12:48:44 | 58,413,445 | 21 | 8 | null | 2018-08-02T21:07:52 | 2016-05-09T22:58:07 | Python | UTF-8 | Python | false | false | 31,616 | py | # coding: utf-8
"""
DashboardApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class DashboardApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def all_dashboards(self, **kwargs):
"""
get all dashboards
Get information about all dashboards.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.all_dashboards(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str fields: Requested fieds.
:return: list[DashboardBase]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['fields']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method all_dashboards" % key
)
params[key] = val
del params['kwargs']
resource_path = '/dashboards'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'fields' in params:
query_params['fields'] = params['fields']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DashboardBase]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def copy_dashboards(self, **kwargs):
"""
copy dashboards to space
### Copy dashboards with specified ids to space
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.copy_dashboards(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Dashboard body: dashboard
:param str space_id: Destination space id.
:param list[str] dashboard_ids: Dashboard ids to copy.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'space_id', 'dashboard_ids']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method copy_dashboards" % key
)
params[key] = val
del params['kwargs']
resource_path = '/dashboards/copy'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'space_id' in params:
query_params['space_id'] = params['space_id']
if 'dashboard_ids' in params:
query_params['dashboard_ids'] = params['dashboard_ids']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def create_dashboard(self, **kwargs):
"""
create dashboard
### Create a dashboard with specified information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_dashboard(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Dashboard body: dashboard
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_dashboard" % key
)
params[key] = val
del params['kwargs']
resource_path = '/dashboards'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Dashboard',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def create_dashboard_prefetch(self, dashboard_id, **kwargs):
"""
create a prefetch
### Create a prefetch for a dashboard with the specified information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_dashboard_prefetch(dashboard_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str dashboard_id: Id of dashboard (required)
:param PrefetchDashboardRequestMapper body: Parameters for prefetch request
:return: PrefetchDashboardRequestMapper
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dashboard_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_dashboard_prefetch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params) or (params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `create_dashboard_prefetch`")
resource_path = '/dashboards/{dashboard_id}/prefetch'.replace('{format}', 'json')
path_params = {}
if 'dashboard_id' in params:
path_params['dashboard_id'] = params['dashboard_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PrefetchDashboardRequestMapper',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def dashboard(self, dashboard_id, **kwargs):
"""
get dashboard
### Get information about the dashboard with a specific id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.dashboard(dashboard_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str dashboard_id: Id of dashboard (required)
:param str fields: Requested fields.
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dashboard_id', 'fields']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method dashboard" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params) or (params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `dashboard`")
resource_path = '/dashboards/{dashboard_id}'.replace('{format}', 'json')
path_params = {}
if 'dashboard_id' in params:
path_params['dashboard_id'] = params['dashboard_id']
query_params = {}
if 'fields' in params:
query_params['fields'] = params['fields']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Dashboard',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def dashboard_prefetch(self, dashboard_id, **kwargs):
"""
get a prefetch
### Get a prefetch for a dashboard with the specified information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.dashboard_prefetch(dashboard_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str dashboard_id: Id of dashboard (required)
:param list[PrefetchDashboardFilterValue] dashboard_filters: JSON encoded string of Dashboard filters that were applied to prefetch
:return: PrefetchMapper
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dashboard_id', 'dashboard_filters']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method dashboard_prefetch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params) or (params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `dashboard_prefetch`")
resource_path = '/dashboards/{dashboard_id}/prefetch'.replace('{format}', 'json')
path_params = {}
if 'dashboard_id' in params:
path_params['dashboard_id'] = params['dashboard_id']
query_params = {}
if 'dashboard_filters' in params:
query_params['dashboard_filters'] = params['dashboard_filters']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PrefetchMapper',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def dashboards_move_plan(self, **kwargs):
"""
plan for moving dashboards to space
### Plan for moving dashboards with specified ids.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.dashboards_move_plan(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str space_id: Destination space id.
:param list[str] dashboard_ids: Dashboard ids to move.
:return: LookMovePlan
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'dashboard_ids']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method dashboards_move_plan" % key
)
params[key] = val
del params['kwargs']
resource_path = '/dashboards/move_plan'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'space_id' in params:
query_params['space_id'] = params['space_id']
if 'dashboard_ids' in params:
query_params['dashboard_ids'] = params['dashboard_ids']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LookMovePlan',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_dashboard(self, dashboard_id, **kwargs):
"""
delete dashboard
### Delete the dashboard with a specific id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_dashboard(dashboard_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str dashboard_id: Id of dashboard (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dashboard_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_dashboard" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params) or (params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `delete_dashboard`")
resource_path = '/dashboards/{dashboard_id}'.replace('{format}', 'json')
path_params = {}
if 'dashboard_id' in params:
path_params['dashboard_id'] = params['dashboard_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def move_dashboards(self, body, **kwargs):
"""
move dashboards to space
### Move dashboards with specified ids to space
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.move_dashboards(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Dashboard body: dashboard (required)
:param str space_id: Destination space id.
:param list[str] dashboard_ids: Dashboard ids to move.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'space_id', 'dashboard_ids']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method move_dashboards" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `move_dashboards`")
resource_path = '/dashboards/move'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'space_id' in params:
query_params['space_id'] = params['space_id']
if 'dashboard_ids' in params:
query_params['dashboard_ids'] = params['dashboard_ids']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def update_dashboard(self, dashboard_id, body, **kwargs):
"""
update dashboard
### Update the dashboard with a specific id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_dashboard(dashboard_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str dashboard_id: Id of dashboard (required)
:param Dashboard body: dashboard (required)
:return: Dashboard
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dashboard_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_dashboard" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params) or (params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `update_dashboard`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_dashboard`")
resource_path = '/dashboards/{dashboard_id}'.replace('{format}', 'json')
path_params = {}
if 'dashboard_id' in params:
path_params['dashboard_id'] = params['dashboard_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Dashboard',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| [
"michaelerasmus@gmail.com"
] | michaelerasmus@gmail.com |
21c9c76e357ed82d65fb33410d6e55d014fba9f3 | 18df7bd3c6a4e35f93b0163b09f0bd304fd82fb9 | /conda/cli/main_run.py | 2e25b2a4149c2a1bf1f325e83c47d951004d188c | [
"BSD-3-Clause",
"MIT"
] | permissive | mitchellkrogza/conda | f1d092d913ac121e3df705decfb3b2e584bf829b | 958f4056578282ef380cdbfc09d3dd736cc5643a | refs/heads/master | 2020-03-25T06:46:56.095771 | 2018-08-02T05:28:42 | 2018-08-02T05:28:42 | 143,523,511 | 1 | 0 | null | 2018-08-04T11:34:43 | 2018-08-04T11:34:43 | null | UTF-8 | Python | false | false | 2,723 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
from os.path import abspath, join
import sys
from tempfile import NamedTemporaryFile
from ..base.context import context
from ..common.compat import ensure_binary, iteritems, on_win
from ..gateways.disk.delete import rm_rf
from ..gateways.subprocess import subprocess_call
def get_activated_env_vars():
env_location = context.target_prefix
if on_win:
env_var_map = _get_activated_env_vars_win(env_location)
else:
env_var_map = _get_activated_env_vars_unix(env_location)
env_var_map = {str(k): str(v) for k, v in iteritems(env_var_map)}
return env_var_map
def _get_activated_env_vars_win(env_location):
try:
conda_bat = os.environ["CONDA_BAT"]
except KeyError:
conda_bat = abspath(join(sys.prefix, 'condacmd', 'conda.bat'))
temp_path = None
try:
with NamedTemporaryFile('w+b', suffix='.bat', delete=False) as tf:
temp_path = tf.name
tf.write(ensure_binary(
"@%CONDA_PYTHON_EXE% -c \"import os, json; print(json.dumps(dict(os.environ)))\""
))
# TODO: refactor into single function along with code in conda.core.link.run_script
cmd_builder = [
"%s" % os.getenv('COMSPEC', 'cmd.exe'),
"/C \"",
"@SET PROMPT= ",
"&&",
"@SET CONDA_CHANGEPS1=false",
"&&",
"@CALL {0} activate \"{1}\"".format(conda_bat, env_location),
"&&",
"\"{0}\"".format(tf.name),
"\"",
]
cmd = " ".join(cmd_builder)
result = subprocess_call(cmd)
finally:
if temp_path:
rm_rf(temp_path)
assert not result.stderr, result.stderr
env_var_map = json.loads(result.stdout)
return env_var_map
def _get_activated_env_vars_unix(env_location):
try:
conda_exe = os.environ["CONDA_EXE"]
except KeyError:
conda_exe = abspath(join(sys.prefix, 'bin', 'conda'))
cmd_builder = [
"sh -c \'"
"eval \"$(\"{0}\" shell.posix hook)\"".format(conda_exe),
"&&",
"conda activate \"{0}\"".format(env_location),
"&&",
"\"$CONDA_PYTHON_EXE\" -c \"import os, json; print(json.dumps(dict(os.environ)))\"",
"\'",
]
cmd = " ".join(cmd_builder)
result = subprocess_call(cmd)
assert not result.stderr, result.stderr
env_var_map = json.loads(result.stdout)
return env_var_map
def execute(args, parser):
from .conda_argparse import _exec
env_vars = get_activated_env_vars()
_exec(args.executable_call, env_vars)
| [
"kfranz@continuum.io"
] | kfranz@continuum.io |
a007a093bbf5492cf4d74affab5f7938abe6ad6a | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_116/1171.py | fbe4a49e751c83434826e0418b4e5ba6c252d680 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,837 | py | #!/usr/bin/env python
T = long(raw_input()) #number of real tests
for t in range(1,T+1):
A = []
for i in range(4):
x = raw_input()
A.append([q for q in x])
x = raw_input() #get rid of trailing blank line
out = ""
dotcnt = 0
#we have the board now look for winners
for r in range(4):
if A[r].count('X') + A[r].count('T') == 4:
out = "X won"
elif A[r].count('O') + A[r].count('T') == 4:
out = "O won"
dotcnt = dotcnt + A[r].count('.')
if out:
print "Case #%i: %s"%(t,out)
continue
C=[] #check one diagonal
for r in range(4):
C.append(A[r][r]) #build the diagonal
if C.count('X') + C.count('T') == 4:
out = "X won"
elif C.count('O') + C.count('T') == 4:
out = "O won"
if out:
print "Case #%i: %s"%(t,out)
continue
C=[] #check other diagonal
for r in range(4):
c=3-r
C.append(A[r][c]) #build the diagonal
if C.count('X') + C.count('T') == 4:
out = "X won"
elif C.count('O') + C.count('T') == 4:
out = "O won"
if out:
print "Case #%i: %s"%(t,out)
continue
B = []
x = []
for c in range(4):
for r in range(4):
x.append(A[r][c])
B.append(x)
x=[]
for r in range(4):
if B[r].count('X') + B[r].count('T') == 4:
out = "X won"
elif B[r].count('O') + B[r].count('T') == 4:
out = "O won"
if out:
print "Case #%i: %s"%(t,out)
continue
if dotcnt == 0:
print "Case #%i: %s"%(t,"Draw")
else:
print "Case #%i: %s"%(t,"Game has not completed")
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
c1f31c8430d43e31a6eef4e68c76c7800e5e01cd | 8e246e74d91565f8040f7dffcfc43e8e72c56285 | /pixiedust/utils/dataFrameAdapter.py | f1393bd6b45c532667676e739348a2ab5e8f891f | [
"Apache-2.0"
] | permissive | EldritchJS/pixiedust | 706728e034be6281320a1d927d2acb74c3c20dc5 | 5eea4a7f061fa6278e7d21db2df18accf48c1d5e | refs/heads/master | 2021-01-21T20:29:27.262716 | 2017-05-24T02:30:29 | 2017-05-24T02:30:29 | 92,239,313 | 0 | 0 | null | 2017-05-24T02:05:31 | 2017-05-24T02:05:31 | null | UTF-8 | Python | false | false | 5,093 | py | # -------------------------------------------------------------------------------
# Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
import numpy as np
import pandas as pd
import re
from pyspark.sql.types import *
import pixiedust.utils.dataFrameMisc as dataFrameMisc
def createDataframeAdapter(entity):
if dataFrameMisc.isPandasDataFrame(entity):
return PandasDataFrameAdapter(entity)
elif dataFrameMisc.isPySparkDataFrame(entity):
return entity
raise ValueError("Invalid argument")
"""
Adapter interface to Spark APIs. Passed to pixiedust visualizations that expect a Spark DataFrame so they can work
with pandas dataframe with no code change.
This is Experimental, currently support only a subset of the Spark DataFrame APIs.
"""
class PandasDataFrameAdapter(object):
def __init__(self, entity):
self.entity = entity
self.sparkDF = dataFrameMisc.isPySparkDataFrame(entity);
def __getattr__(self, name):
if self.sparkDF and hasattr(self.entity, name):
return self.entity.__getattribute__(name)
if name=="schema":
return type("AdapterSchema",(),{"fields": self.getFields()})()
elif name=="groupBy":
return lambda cols: AdapterGroupBy(self.entity.groupby(cols))
elif name=="dropna":
return lambda: PandasDataFrameAdapter(pd.DataFrame(self.entity.dropna()))
elif name=="sort":
return lambda arg: self
elif name=="select":
return lambda name: PandasDataFrameAdapter(self.entity[name].reset_index())
elif name=="orderBy":
return lambda col: PandasDataFrameAdapter(self.entity.sort("agg",ascending=False))
raise AttributeError("{0} attribute not found".format(name))
def count(self):
if self.sparkDF:
return self.entity.count()
else:
return len(self.entity.index)
def take(self,num):
if self.sparkDF:
return self.entity.take(num)
else:
df = self.entity.head(num)
colNames = self.entity.columns.values.tolist()
def makeJsonRow(row):
ret = {}
for i,v in enumerate(colNames):
ret[v]=row[i]
return ret
return [makeJsonRow(self.entity.iloc[i].values.tolist()) for i in range(0,len(df.index))]
def getFields(self):
if self.sparkDF:
return self.entity.schema.fields
else:
#must be a pandas dataframe
def createObj(a,b):
return type("",(),{
"jsonValue":lambda self: {"type": b, "name": a}, "name":a,
"dataType": IntegerType() if np.issubdtype(b, np.integer) or np.issubdtype(b, np.float) else StringType()
})()
return [createObj(a,b) for a,b in zip(self.entity.columns, self.entity.dtypes)]
def getTypeName(self):
if self.sparkDF:
return self.entity.schema.typeName()
else:
return "Pandas DataFrame Row"
def toPandas(self):
if self.sparkDF:
return self.entity.toPandas()
else:
return self.entity
class AdapterGroupBy(object):
def __init__(self, group):
self.group = group
def count(self):
return PandasDataFrameAdapter(self.group.size().reset_index(name="count"))
def agg(self,exp):
m=re.search("(\w+?)\((.+?)\)(?:.+?(?:as\s+(\w*))|$)",str(exp),re.IGNORECASE)
if m is None:
raise AttributeError("call to agg with not supported expression: {0}".format(str(exp)))
funcName=m.group(1).upper()
groupedCol=m.group(2)
alias=m.group(3) or "agg"
if funcName=="SUM":
return PandasDataFrameAdapter(self.group[groupedCol].sum().reset_index(name=alias))
elif funcName=="AVG":
return PandasDataFrameAdapter(self.group[groupedCol].mean().reset_index(name=alias))
elif funcName == "MIN":
return PandasDataFrameAdapter(self.group[groupedCol].min().reset_index(name=alias))
elif funcName == "MAX":
return PandasDataFrameAdapter(self.group[groupedCol].max().reset_index(name=alias))
elif funcName == "COUNT":
return PandasDataFrameAdapter(self.group[groupedCol].count().reset_index(name=alias))
else:
raise AttributeError("Unsupported aggregation function {0}".format(funcName)) | [
"david_taieb@us.ibm.com"
] | david_taieb@us.ibm.com |
34f18b43569133bf9e8e85b99fd90377e982c9b2 | fa346a2d5886420e22707a7be03599e634b230a9 | /temboo/Library/Facebook/Publishing/PublishNote.py | 5cb13e2c67897b548f536ca97016a58a44a8c195 | [] | no_license | elihuvillaraus/entity-resolution | cebf937499ed270c3436b1dd25ab4aef687adc11 | 71dd49118a6e11b236861289dcf36436d31f06bc | refs/heads/master | 2021-12-02T17:29:11.864065 | 2014-01-08T04:29:30 | 2014-01-08T04:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,634 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# PublishNote
# Publishes a note on a given profile.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class PublishNote(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the PublishNote Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Facebook/Publishing/PublishNote')
def new_input_set(self):
return PublishNoteInputSet()
def _make_result_set(self, result, path):
return PublishNoteResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return PublishNoteChoreographyExecution(session, exec_id, path)
class PublishNoteInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the PublishNote
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved from the final step of the OAuth process.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_Message(self, value):
"""
Set the value of the Message input for this Choreo. ((required, string) The contents of the note.)
"""
InputSet._set_input(self, 'Message', value)
def set_ProfileID(self, value):
"""
Set the value of the ProfileID input for this Choreo. ((optional, string) The id of the profile that the note will be published to. Defaults to "me" indicating the authenticated user.)
"""
InputSet._set_input(self, 'ProfileID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Can be set to xml or json. Defaults to json.)
"""
InputSet._set_input(self, 'ResponseFormat', value)
def set_Subject(self, value):
"""
Set the value of the Subject input for this Choreo. ((required, string) A subject line for the note being created.)
"""
InputSet._set_input(self, 'Subject', value)
class PublishNoteResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the PublishNote Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Facebook. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
def getFacebookObjectId(self):
"""
Get the ID of the object that has been created
"""
return self.getJSONFromString(self._output.get('Response', [])).get("id", [])
class PublishNoteChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return PublishNoteResultSet(response, path)
| [
"cedric.warny@gmail.com"
] | cedric.warny@gmail.com |
b4724b82e389ae6ab9ab29dfe11c7ad6f6d3f090 | ea4e262f3dc18a089895fef08bedefc60b66e373 | /unsupervised_learning/0x04-autoencoders/3-variational.py | e87abd498de0857be678fa67f68543ab58e9abff | [] | no_license | d1sd41n/holbertonschool-machine_learning | 777899d4914e315883ba0c887d891c0c8ab01c8a | 5f86dee95f4d1c32014d0d74a368f342ff3ce6f7 | refs/heads/main | 2023-07-17T09:22:36.257702 | 2021-08-27T03:44:24 | 2021-08-27T03:44:24 | 317,399,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,502 | py | #!/usr/bin/env python3
"""[summary]
Returns:
[type]: [description]
"""
import tensorflow.keras as keras
def autoencoder(input_dims, hidden_layers, latent_dims):
"""[summary]
Args:
input_dims ([type]): [description]
hidden_layers ([type]): [description]
latent_dims ([type]): [description]
Returns:
[type]: [description]
"""
backend = keras.backend
def s_a(args):
z_mean, z_log_sigma = args
batch = backend.shape(
z_mean
)[0]
epsilon = backend.random_normal(
shape=(batch, latent_dims),
mean=0.0,
stddev=0.1)
return z_mean + backend.exp(
z_log_sigma) * epsilon
encoder_In = keras.Input(
shape=(
input_dims,))
encoder = encoder_In
for nodes in hidden_layers:
encoder = keras.layers.Dense(
nodes,
activation='relu'
)(encoder)
z_mean = keras.layers.Dense(
latent_dims)(
encoder)
z_log_sigma = keras.layers.Dense(
latent_dims
)(encoder)
z = keras.layers.Lambda(
s_a)([z_mean, z_log_sigma]
)
decoder_In = keras.Input(
shape=(latent_dims,
))
decoder = decoder_In
for nodes in hidden_layers[::-1]:
decoder = keras.layers.Dense(
nodes,
activation='relu'
)(
decoder)
decoder = keras.layers.Dense(
input_dims,
activation='sigmoid'
)(
decoder)
encoder = keras.Model(encoder_In,
[z, z_mean, z_log_sigma]
)
decoder = keras.Model(
decoder_In,
decoder)
out = decoder(
encoder(
encoder_In))
auto = keras.Model(
encoder_In,
out)
def cost_f(val1, val2):
reconstruction_loss = keras.losses.binary_crossentropy(
encoder_In,
out
)
reconstruction_loss *= input_dims
kl_loss = 1 + z_log_sigma
kl_loss = kl_loss - backend.square(
z_mean) - backend.exp(
z_log_sigma
)
kl_loss = backend.sum(
kl_loss,
axis=-1)
kl_loss *= -0.5
cost_f = backend.mean(
reconstruction_loss + kl_loss
)
return cost_f
auto.compile(
optimizer='adam',
loss=cost_f
)
return encoder, decoder, auto
| [
"1498@holbertonschool.com"
] | 1498@holbertonschool.com |
59916d167f39c2a441b8c66f11211e9e2010d2c8 | 60cdd2f763e8ebd19eae1392a1533ce889123ba2 | /main.py | 10661e422cb540c411bf2c3e0779cff557d333e9 | [] | no_license | shohei/logic_analyzer | 79fd7d88be421cac7989369ef640e51e548b9f1a | 2d662d9e81aacd1c81b0a5d389c891b7ebbca466 | refs/heads/master | 2020-05-21T10:11:58.469328 | 2017-04-05T14:03:42 | 2017-04-05T14:03:42 | 69,441,097 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,718 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
from ctypes import *
from numpy import *
from dwfconstants import *
import math
import sys
import matplotlib.pyplot as plt
import pdb
from decoder import decodemap
if __name__=="__main__":
f = open("record.csv", "w")
if sys.platform.startswith("win"):
dwf = cdll.dwf
elif sys.platform.startswith("darwin"):
dwf = cdll.LoadLibrary("/Library/Frameworks/dwf.framework/dwf")
else:
dwf = cdll.LoadLibrary("libdwf.so")
#declare ctype variables
hdwf = c_int()
sts = c_byte()
#print DWF version
version = create_string_buffer(16)
dwf.FDwfGetVersion(version)
print ("DWF Version: "+version.value)
#open device
print ("Opening first device")
dwf.FDwfDeviceOpen(c_int(-1), byref(hdwf))
if hdwf.value == hdwfNone.value:
print ("failed to open device")
quit()
print ("Configuring Digital Out / In...")
# generate counter
# generate on DIO-0 1MHz pulse (100MHz/25/(3+1)), 25% duty (3low 1high)
#1kHz
# dwf.FDwfDigitalOutEnableSet(hdwf, c_int(i), c_int(1))
# dwf.FDwfDigitalOutDividerSet(hdwf, c_int(i), c_int(25))
# dwf.FDwfDigitalOutCounterSet(hdwf, c_int(i), c_int(3), c_int(1))
# for i in range(0, 16):
for i in range(0, 1):
dwf.FDwfDigitalOutEnableSet(hdwf, c_int(i), c_int(1))
dwf.FDwfDigitalOutDividerSet(hdwf, c_int(i), c_int(25*1000)) #1MHz -> 1kHz
dwf.FDwfDigitalOutCounterSet(hdwf, c_int(i), c_int(3), c_int(1))
for i in range(2, 15):
dwf.FDwfDigitalOutEnableSet(hdwf, c_int(i), c_int(1))
dwf.FDwfDigitalOutDividerSet(hdwf, c_int(i), c_int(25*1000)) #1MHz -> 1kHz
dwf.FDwfDigitalOutCounterSet(hdwf, c_int(i), c_int(4), c_int(0))
dwf.FDwfDigitalOutConfigure(hdwf, c_int(1))
# set number of sample to acquire
nSamples = 1000
# nSamples = 1000
rgwSamples = (c_uint16*nSamples)()
cAvailable = c_int()
cLost = c_int()
cCorrupted = c_int()
cSamples = 0
fLost = 0
fCorrupted = 0
# in record mode samples after trigger are acquired only
# dwf.FDwfDigitalInAcquisitionModeSet(hdwf, acqmodeRecord)
dwf.FDwfDigitalInAcquisitionModeSet(hdwf, acqmodeScanScreen)
# sample rate = system frequency / divider, 100MHz/1000 = 100kHz
dwf.FDwfDigitalInDividerSet(hdwf, c_int(1*100*10)) #10kHz
# 16bit per sample format
dwf.FDwfDigitalInSampleFormatSet(hdwf, c_int(16))
# number of samples after trigger
# dwf.FDwfDigitalInTriggerPositionSet(hdwf, c_int(nSamples))
# trigger when all digital pins are low
# dwf.FDwfDigitalInTriggerSourceSet(hdwf, trigsrcDetectorDigitalIn)
# trigger detector mask: low & hight & ( rising | falling )
# dwf.FDwfDigitalInTriggerSet(hdwf, c_int(0xFFFF), c_int(0), c_int(0), c_int(0))
# 16個のピン全てでローボルテージトリガをかける
# dwf.FDwfDigitalInTriggerSet(hdwf, c_int(0xFFFF), c_int(0), c_int(0), c_int(0))
# begin acquisition
dwf.FDwfDigitalInConfigure(hdwf, c_bool(0), c_bool(1))
print ("Starting record")
plt.ion()
fig = plt.figure() # Create figure
axes = fig.add_subplot(111) # Add subplot (dont worry only one plot appears)
axes.set_autoscale_on(True) # enable autoscale
axes.autoscale_view(True,True,True)
# axes.autoscale_view(True,True,True)
hl, = plt.plot([], [])
hl.set_xdata(range(0,len(rgwSamples)))
# current_range = 0
# while cSamples < nSamples:
x = 0
y = 0
z = 0
while True:
if(cSamples == nSamples):
# current_range += len(rgwSamples)
# hl.set_xdata(range(current_range,current_range+nSamples))
# axes.relim() # Recalculate limits
# axes.autoscale_view(True,True,True) #Autoscale
# plt.draw()
# plt.pause(0.01)
for v in rgwSamples:
hexa = int(v)
x += decodemap.ix[hexa,"x"]
y += decodemap.ix[hexa,"y"]
z += decodemap.ix[hexa,"z"]
f.write("%d %d %d\n" % (x,y,z))
rgwSamples = (c_uint16*nSamples)()
cSamples = 0
dwf.FDwfDigitalInStatus(hdwf, c_int(1), byref(sts))
if cSamples == 0 and (sts == DwfStateConfig or sts == DwfStatePrefill or sts == DwfStateArmed) :
# acquisition not yet started.
continue
dwf.FDwfDigitalInStatusRecord(hdwf, byref(cAvailable), byref(cLost), byref(cCorrupted))
cSamples += cLost.value
if cLost.value:
fLost = 1
print ("Samples were lost! Reduce sample rate")
if cCorrupted.value:
print ("Samples could be corrupted! Reduce sample rate")
fCorrupted = 1
if cAvailable.value==0 :
continue
if cSamples+cAvailable.value > nSamples :
cAvailable = c_int(nSamples-cSamples)
dwf.FDwfDigitalInStatusData(hdwf, byref(rgwSamples, 2*cSamples), c_int(2*cAvailable.value))
# print cAvailable.value
cSamples += cAvailable.value
# total_pulse += len((nonzero(rgwSamples))[0])
# hl.set_ydata(rgwSamples)
# axes.relim() # Recalculate limits
# axes.autoscale_view(True,True,True) #Autoscale
# plt.draw()
# plt.pause(0.01)
#never reached
dwf.FDwfDeviceClose(hdwf)
f.close()
| [
"shoaok@gmail.com"
] | shoaok@gmail.com |
83022f241ae32a26138b5364a2a014ca419a787f | 84a96dbd96e926ebb5c658e3cb897db276c32d6c | /tensorflow/tools/docs/pretty_docs.py | 98b5c7a3b397c98742124da5989fbb2e34fe8b9b | [
"Apache-2.0"
] | permissive | MothCreations/gavlanWheels | bc9189092847369ad291d1c7d3f4144dd2239359 | 01d8a43b45a26afec27b971f686f79c108fe08f9 | refs/heads/master | 2022-12-06T09:27:49.458800 | 2020-10-13T21:56:40 | 2020-10-13T21:56:40 | 249,206,716 | 6 | 5 | Apache-2.0 | 2022-11-21T22:39:47 | 2020-03-22T14:57:45 | C++ | UTF-8 | Python | false | false | 10,101 | py | # Lint as: python2, python3
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module for converting parsed doc content into markdown pages.
The adjacent `parser` module creates `PageInfo` objects, containing all data
necessary to document an element of the TensorFlow API.
This module contains one public function, which handels the conversion of these
`PageInfo` objects into a markdown string:
md_page = build_md_page(page_info)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
import six
def build_md_page(page_info):
"""Given a PageInfo object, return markdown for the page.
Args:
page_info: must be a `parser.FunctionPageInfo`, `parser.ClassPageInfo`, or
`parser.ModulePageInfo`
Returns:
Markdown for the page
Raises:
ValueError: if `page_info` is an instance of an unrecognized class
"""
if page_info.for_function():
return _build_function_page(page_info)
if page_info.for_class():
return _build_class_page(page_info)
if page_info.for_module():
return _build_module_page(page_info)
raise ValueError('Unknown Page Info Type: %s' % type(page_info))
def _build_function_page(page_info):
"""Given a FunctionPageInfo object Return the page as an md string."""
parts = ['# %s\n\n' % page_info.full_name]
parts.append(_build_aliases(page_info.aliases))
if page_info.signature is not None:
parts.append(_build_signature(page_info))
if page_info.defined_in:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.guides)
parts.append(page_info.doc.docstring)
parts.append(_build_function_details(page_info.doc.function_details))
parts.append(_build_compatibility(page_info.doc.compatibility))
return ''.join(parts)
def _build_class_page(page_info):
"""Given a ClassPageInfo object Return the page as an md string."""
parts = ['# {page_info.full_name}\n\n'.format(page_info=page_info)]
parts.append('## Class `%s`\n\n' %
six.ensure_str(page_info.full_name).split('.')[-1])
if page_info.bases:
parts.append('Inherits From: ')
link_template = '[`{short_name}`]({url})'
parts.append(', '.join(
link_template.format(**base._asdict()) for base in page_info.bases))
parts.append('\n\n')
# Sort the methods list, but make sure constructors come first.
constructor_names = ['__init__', '__new__']
constructors = sorted(
method for method in page_info.methods
if method.short_name in constructor_names)
other_methods = sorted(
method for method in page_info.methods
if method.short_name not in constructor_names)
parts.append(_build_aliases(page_info.aliases))
if page_info.defined_in is not None:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.guides)
parts.append(page_info.doc.docstring)
parts.append(_build_function_details(page_info.doc.function_details))
parts.append(_build_compatibility(page_info.doc.compatibility))
parts.append('\n\n')
if constructors:
for method_info in constructors:
parts.append(_build_method_section(method_info, heading_level=2))
parts.append('\n\n')
if page_info.classes:
parts.append('## Child Classes\n')
link_template = ('[`class {class_info.short_name}`]'
'({class_info.url})\n\n')
class_links = sorted(
link_template.format(class_info=class_info)
for class_info in page_info.classes)
parts.extend(class_links)
if page_info.properties:
parts.append('## Properties\n\n')
for prop_info in page_info.properties:
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
parts.append(h3.format(short_name=prop_info.short_name))
parts.append(prop_info.doc.docstring)
parts.append(_build_function_details(prop_info.doc.function_details))
parts.append(_build_compatibility(prop_info.doc.compatibility))
parts.append('\n\n')
parts.append('\n\n')
if other_methods:
parts.append('## Methods\n\n')
for method_info in other_methods:
parts.append(_build_method_section(method_info))
parts.append('\n\n')
if page_info.other_members:
parts.append('## Class Members\n\n')
# TODO(markdaoust): Document the value of the members,
# at least for basic types.
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
others_member_headings = (h3.format(short_name=info.short_name)
for info in sorted(page_info.other_members))
parts.extend(others_member_headings)
return ''.join(parts)
def _build_method_section(method_info, heading_level=3):
"""Generates a markdown section for a method.
Args:
method_info: A `MethodInfo` object.
heading_level: An Int, which HTML heading level to use.
Returns:
A markdown string.
"""
parts = []
heading = ('<h{heading_level} id="{short_name}">'
'<code>{short_name}</code>'
'</h{heading_level}>\n\n')
parts.append(heading.format(heading_level=heading_level,
**method_info._asdict()))
if method_info.signature is not None:
parts.append(_build_signature(method_info, use_full_name=False))
parts.append(method_info.doc.docstring)
parts.append(_build_function_details(method_info.doc.function_details))
parts.append(_build_compatibility(method_info.doc.compatibility))
parts.append('\n\n')
return ''.join(parts)
def _build_module_page(page_info):
"""Given a ClassPageInfo object Return the page as an md string."""
parts = ['# Module: {full_name}\n\n'.format(full_name=page_info.full_name)]
parts.append(_build_aliases(page_info.aliases))
if page_info.defined_in is not None:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.doc.docstring)
parts.append(_build_compatibility(page_info.doc.compatibility))
parts.append('\n\n')
if page_info.modules:
parts.append('## Modules\n\n')
template = '[`{short_name}`]({url}) module'
for item in page_info.modules:
parts.append(template.format(**item._asdict()))
if item.doc.brief:
parts.append(': ' + six.ensure_str(item.doc.brief))
parts.append('\n\n')
if page_info.classes:
parts.append('## Classes\n\n')
template = '[`class {short_name}`]({url})'
for item in page_info.classes:
parts.append(template.format(**item._asdict()))
if item.doc.brief:
parts.append(': ' + six.ensure_str(item.doc.brief))
parts.append('\n\n')
if page_info.functions:
parts.append('## Functions\n\n')
template = '[`{short_name}(...)`]({url})'
for item in page_info.functions:
parts.append(template.format(**item._asdict()))
if item.doc.brief:
parts.append(': ' + six.ensure_str(item.doc.brief))
parts.append('\n\n')
if page_info.other_members:
# TODO(markdaoust): Document the value of the members,
# at least for basic types.
parts.append('## Other Members\n\n')
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
for item in page_info.other_members:
parts.append(h3.format(**item._asdict()))
return ''.join(parts)
def _build_signature(obj_info, use_full_name=True):
"""Returns a md code block showing the function signature."""
# Special case tf.range, since it has an optional first argument
if obj_info.full_name == 'tf.range':
return (
'``` python\n'
"tf.range(limit, delta=1, dtype=None, name='range')\n"
"tf.range(start, limit, delta=1, dtype=None, name='range')\n"
'```\n\n')
parts = ['``` python']
parts.extend(['@' + six.ensure_str(dec) for dec in obj_info.decorators])
signature_template = '{name}({sig})'
if not obj_info.signature:
sig = ''
elif len(obj_info.signature) == 1:
sig = obj_info.signature[0]
else:
sig = ',\n'.join(' %s' % sig_item for sig_item in obj_info.signature)
sig = '\n'+sig+'\n'
if use_full_name:
obj_name = obj_info.full_name
else:
obj_name = obj_info.short_name
parts.append(signature_template.format(name=obj_name, sig=sig))
parts.append('```\n\n')
return '\n'.join(parts)
def _build_compatibility(compatibility):
"""Return the compatibility section as an md string."""
parts = []
sorted_keys = sorted(compatibility.keys())
for key in sorted_keys:
value = compatibility[key]
# Dedent so that it does not trigger markdown code formatting.
value = textwrap.dedent(value)
parts.append('\n\n#### %s Compatibility\n%s\n' % (key.title(), value))
return ''.join(parts)
def _build_function_details(function_details):
"""Return the function details section as an md string."""
parts = []
for detail in function_details:
sub = []
sub.append('#### ' + six.ensure_str(detail.keyword) + ':\n\n')
sub.append(textwrap.dedent(detail.header))
for key, value in detail.items:
sub.append('* <b>`%s`</b>: %s' % (key, value))
parts.append(''.join(sub))
return '\n'.join(parts)
def _build_aliases(aliases):
aliases = sorted(aliases, key=lambda x: ('compat.v' in x, x))
parts = []
if len(aliases) > 1:
parts.append('**Aliases**: ')
parts.extend(', '.join('`{}`'.format(name) for name in aliases))
parts.append('\n\n')
return ''.join(parts)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
2505e320f99f41f3496e7a095506b46a630e9e81 | e1a7d00dbe27403427078c627ccebe1562a6049d | /mercury/plugin/client/activity_window.py | 800a6df0ee4cca37dbc125cb1e0e71b51922cb5f | [
"Apache-2.0"
] | permissive | greenlsi/mercury_mso_framework | f24fc167230057bb07b7de5dc9fbb10490293fee | cb425605de3341d27ce43fb326b300cb8ac781f6 | refs/heads/master | 2023-04-28T02:18:16.362823 | 2023-04-18T12:03:23 | 2023-04-18T12:03:23 | 212,610,400 | 2 | 1 | Apache-2.0 | 2023-03-02T14:36:56 | 2019-10-03T15:12:32 | Python | UTF-8 | Python | false | false | 731 | py | from ..common.event_generator import *
class SrvActivityWindowGenerator(EventGenerator[None], ABC):
pass
class ConstantSrvWindowGenerator(SrvActivityWindowGenerator, PeriodicGenerator[None]):
def __init__(self, **kwargs):
kwargs = {**kwargs, 'period': kwargs['length']}
super().__init__(**kwargs)
class UniformSrvWindowGenerator(SrvActivityWindowGenerator, UniformDistributionGenerator[None]):
pass
class GaussianSrvWindowGenerator(SrvActivityWindowGenerator, GaussianDistributionGenerator[None]):
pass
class ExponentialSrvWindowGenerator(SrvActivityWindowGenerator, ExponentialDistributionGenerator[None]):
pass
class LambdaSrvSessionDuration(LambdaDrivenGenerator[None]):
pass
| [
"rcardenas.rod@gmail.com"
] | rcardenas.rod@gmail.com |
5cb11f9692234a96b5d785b845b15d74b1f63c91 | 52b6508d4f6f38f068b27c414970aa21460a7b25 | /terraform_validator/custom_rules/ManagedPolicyOnUserRule.py | 713d17af796bee5e9815401961f5155ef3e1b3c0 | [
"MIT"
] | permissive | rubelw/terraform-validator | 97698751ed828e54b4257a378c2dd21c5ec9bf24 | a9d0335a532acdb4070e5537155b03b34915b73e | refs/heads/master | 2020-03-31T04:09:22.435690 | 2018-10-22T03:24:40 | 2018-10-22T03:24:40 | 151,893,251 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,921 | py | from __future__ import absolute_import, division, print_function
import inspect
import sys
from builtins import (str)
from terraform_validator.custom_rules.BaseRule import BaseRule
def lineno():
"""Returns the current line number in our program."""
return str(' - ManagedPolicyOnUserRule - caller: '+str(inspect.stack()[1][3])+' - line number: '+str(inspect.currentframe().f_back.f_lineno))
class ManagedPolicyOnUserRule(BaseRule):
def __init__(self, cfn_model=None, debug=None):
"""
Initialize
:param cfn_model:
"""
BaseRule.__init__(self, cfn_model, debug=debug)
def rule_text(self):
"""
Get rule text
:return:
"""
if self.debug:
print('rule_text'+lineno())
return 'IAM managed policy should not apply directly to users. Should be on group'
def rule_type(self):
"""
Get rule type
:return:
"""
self.type= 'VIOLATION::FAILING_VIOLATION'
return 'VIOLATION::FAILING_VIOLATION'
def rule_id(self):
"""
Get rule id
:return:
"""
if self.debug:
print('rule_id'+lineno())
self.id ='F12'
return 'F12'
def audit_impl(self):
"""
Audit
:return: violations
"""
if self.debug:
print('ManagedPolicyOnUserRule - audit_impl'+lineno())
violating_policies = []
resources= self.cfn_model.resources_by_type('AWS::IAM::ManagedPolicy')
if len(resources)>0:
for resource in resources:
if self.debug:
print('resource: '+str(resource)+lineno())
if hasattr(resource,'users'):
if resource.users:
if self.debug:
print('users: '+str(resource.users))
if len(resource.users)>0:
violating_policies.append(str(resource.logical_resource_id))
else:
if self.debug:
print('no violating_policies' + lineno())
return violating_policies | [
"rubelwi@Wills-MacBook-Pro.local"
] | rubelwi@Wills-MacBook-Pro.local |
b318fa6c4d7fb0cbd18c7f80899aac561a1d4362 | be50b4dd0b5b8c3813b8c3158332b1154fe8fe62 | /Backtracking/Python/MaximalString.py | 72a24b627e0102f42fd6027473225816f4bd5698 | [] | no_license | Zimmermann25/InterviewBit | a8d89e090068d9644e28085625963c8ce75d3dff | 6d2138e740bd5ba8eab992d9bf090977e077bfc5 | refs/heads/main | 2023-03-24T18:12:48.244950 | 2021-03-24T14:36:48 | 2021-03-24T14:36:48 | 350,835,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | class Solution:
# @param A : string
# @param B : integer
# @return a strings
#backtrack naciagany na siłe, w cpp dla ponizszych danych program sie wywala
# a tutaj iteracyjnie wszystko git, ale nie zalicza rozwiązania
#10343456789765432457689065543876
#10
# w c++ backtracking z komentarza jest uznawany, choć dla długości A ~kilkanaści juz się
#wysypuje xdddddddd, tu działa poprawnie i nie jest uznawany :(
def solve(self, A, B):
arr = list(A)
counter = 0
i = 0
while i < len(arr):# tak jak w selection sort, ale tutaj maks B swapów
maxIndex = i
k = i+1
#print("A[maxIndex]: ", arr[i], "arr: ", arr, "i: ", i)
while k < len(arr):
if arr[k] > arr[maxIndex]: # co z >=??
maxIndex = k
if arr[k]==9:break
k+=1
if maxIndex!=i:#sprawdz, czy wykonac swap
arr[i], arr[maxIndex] = arr[maxIndex], arr[i]
counter +=1
if counter ==B:break
i +=1
return "".join(arr) | [
"noreply@github.com"
] | Zimmermann25.noreply@github.com |
4edd0fec655bb06ff52b8a23179b60a8687a9592 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/dictat.py | ff1aad7e5bdcfe0547e68b143e7655fd1b342ebe | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 2,143 | py | ii = [('BentJDO2.py', 28), ('CookGHP3.py', 12), ('MarrFDI.py', 3), ('RogePAV2.py', 1), ('CoolWHM2.py', 1), ('GodwWSL2.py', 13), ('ChanWS.py', 2), ('SadlMLP.py', 37), ('WilbRLW.py', 9), ('WilbRLW4.py', 8), ('ProuWCM.py', 2), ('AubePRP2.py', 8), ('CookGHP.py', 12), ('ShawHDE.py', 1), ('MartHSI2.py', 3), ('KembFJ1.py', 1), ('WilbRLW5.py', 7), ('LeakWTI3.py', 2), ('MarrFDI3.py', 1), ('PeckJNG.py', 1), ('BailJD2.py', 1), ('AubePRP.py', 3), ('ChalTPW2.py', 4), ('AdamWEP.py', 3), ('WilbRLW2.py', 8), ('ClarGE2.py', 5), ('WilkJMC2.py', 6), ('CarlTFR.py', 3), ('SeniNSP.py', 1), ('LyttELD.py', 4), ('CoopJBT2.py', 3), ('TalfTAC.py', 1), ('GrimSLE.py', 2), ('RoscTTI3.py', 5), ('AinsWRR3.py', 1), ('CookGHP2.py', 16), ('RoscTTI2.py', 3), ('CoolWHM.py', 2), ('MarrFDI2.py', 6), ('CrokTPS.py', 2), ('ClarGE.py', 16), ('LandWPA.py', 1), ('BuckWGM.py', 2), ('IrviWVD.py', 3), ('GilmCRS.py', 3), ('DaltJMA.py', 2), ('DibdTRL2.py', 6), ('AinsWRR.py', 1), ('CrocDNL.py', 3), ('MedwTAI.py', 3), ('LandWPA2.py', 4), ('WadeJEB.py', 5), ('FerrSDO2.py', 1), ('NewmJLP.py', 2), ('GodwWLN.py', 8), ('CoopJBT.py', 2), ('KirbWPW2.py', 2), ('SoutRD2.py', 1), ('LeakWTI.py', 2), ('MedwTAI2.py', 3), ('BachARE.py', 18), ('SoutRD.py', 1), ('BuckWGM2.py', 1), ('WheeJPT.py', 5), ('MereHHB3.py', 1), ('HowiWRL2.py', 3), ('MereHHB.py', 2), ('WilkJMC.py', 4), ('HogaGMM.py', 4), ('MartHRW.py', 1), ('MackCNH.py', 1), ('BabbCEM.py', 2), ('FitzRNS4.py', 2), ('CoolWHM3.py', 4), ('BentJRP.py', 7), ('LyttELD3.py', 1), ('FerrSDO.py', 2), ('RoscTTI.py', 1), ('ThomGLG.py', 1), ('StorJCC.py', 17), ('KembFJ2.py', 1), ('LewiMJW.py', 3), ('BabbCRD.py', 3), ('MackCNH2.py', 4), ('JacoWHI2.py', 3), ('HaliTBC.py', 10), ('WilbRLW3.py', 5), ('AinsWRR2.py', 1), ('MereHHB2.py', 1), ('JacoWHI.py', 2), ('ClarGE3.py', 15), ('RogeSIP.py', 2), ('DibdTRL.py', 3), ('FitzRNS2.py', 4), ('MartHSI.py', 2), ('EvarJSP.py', 15), ('DwigTHH.py', 5), ('NortSTC.py', 1), ('SadlMLP2.py', 15), ('BeckWRE.py', 1), ('TaylIF.py', 2), ('WordWYR.py', 1), ('ChalTPW.py', 6), ('ThomWEC.py', 5), ('KeigTSS.py', 2), ('KirbWPW.py', 1), ('WaylFEP.py', 3), ('BentJDO.py', 22), ('ClarGE4.py', 5), ('AdamJOA.py', 2)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
187c493ffee7eea9d2b9be959e9c3f10767c80e0 | b8755b5f0b5a3c1bba1270cc8f20dc172abb0634 | /django_data/jobs/forms.py | ac34414120aaf22d30e0ee7da22da786f158b801 | [
"Apache-2.0"
] | permissive | AndersonHJB/Django_Leraning | bf44af05b0e604342fd97cb8699385461cbbb965 | 95c34057f643b234478e72665c6454ebd99cb6cd | refs/heads/main | 2023-07-12T02:47:34.289089 | 2021-08-15T10:17:32 | 2021-08-15T10:17:32 | 367,765,444 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | from django.forms import ModelForm
from .models import Resume
class ResumeForm(ModelForm):
class Meta:
model = Resume
fields = ["username", "city", "phone",
"email", "apply_position", "born_address", "gender", "picture", "attachment",
"bachelor_school", "master_school", "major", "degree",
"candidate_introduction", "work_experience", "project_experience"] | [
"1432803776@qq.com"
] | 1432803776@qq.com |
dfb51f5c1ad3a8dd2cb83848b9a4e250be99093c | a86ae168b77fa5bfd0b24a871ae2d010af4cc121 | /myenv/easyjob/employee/migrations/0005_auto_20180613_1241.py | de71fb456febb3c085d151a2f6dfbb8d7725dbe2 | [] | no_license | sujanbajracharya1921/easyjob | 15c94636701151bb1b58948d9b17e94ccdfa5b6a | cc9066765517cb0b5f691aeff765dbda69a09870 | refs/heads/master | 2022-11-08T06:23:27.144778 | 2019-03-27T15:55:01 | 2019-03-27T15:55:01 | 150,414,578 | 1 | 2 | null | 2022-10-29T01:53:57 | 2018-09-26T11:16:40 | Python | UTF-8 | Python | false | false | 414 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('employee', '0004_auto_20180606_1101'),
]
operations = [
migrations.AlterField(
model_name='skill',
name='skill',
field=models.CharField(max_length=30, unique=True),
),
]
| [
"you@example.com"
] | you@example.com |
9845b324350947ec6c3ec0b9803612d0eee0497a | 8f75dae40363144b7ea0eccb1b2fab804ee60711 | /tests/integration/goldens/logging/google/cloud/logging_v2/types/logging.py | a4610dd59de0198b036b93249336faa6ed4d9b78 | [
"Apache-2.0"
] | permissive | software-dov/gapic-generator-python | a2298c13b02bff87888c2949f4909880c3fa2408 | 304b30d3b4ec9ccb730251154b10896146a52900 | refs/heads/master | 2022-06-04T00:14:28.559534 | 2022-02-28T18:13:26 | 2022-02-28T18:13:26 | 191,990,527 | 0 | 1 | Apache-2.0 | 2022-01-27T19:35:04 | 2019-06-14T18:41:06 | Python | UTF-8 | Python | false | false | 21,868 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.api import monitored_resource_pb2 # type: ignore
from google.cloud.logging_v2.types import log_entry
from google.protobuf import duration_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.logging.v2',
manifest={
'DeleteLogRequest',
'WriteLogEntriesRequest',
'WriteLogEntriesResponse',
'WriteLogEntriesPartialErrors',
'ListLogEntriesRequest',
'ListLogEntriesResponse',
'ListMonitoredResourceDescriptorsRequest',
'ListMonitoredResourceDescriptorsResponse',
'ListLogsRequest',
'ListLogsResponse',
'TailLogEntriesRequest',
'TailLogEntriesResponse',
},
)
class DeleteLogRequest(proto.Message):
r"""The parameters to DeleteLog.
Attributes:
log_name (str):
Required. The resource name of the log to delete:
::
"projects/[PROJECT_ID]/logs/[LOG_ID]"
"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]"
"folders/[FOLDER_ID]/logs/[LOG_ID]"
``[LOG_ID]`` must be URL-encoded. For example,
``"projects/my-project-id/logs/syslog"``,
``"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"``.
For more information about log names, see
[LogEntry][google.logging.v2.LogEntry].
"""
log_name = proto.Field(
proto.STRING,
number=1,
)
class WriteLogEntriesRequest(proto.Message):
r"""The parameters to WriteLogEntries.
Attributes:
log_name (str):
Optional. A default log resource name that is assigned to
all log entries in ``entries`` that do not specify a value
for ``log_name``:
::
"projects/[PROJECT_ID]/logs/[LOG_ID]"
"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]"
"folders/[FOLDER_ID]/logs/[LOG_ID]"
``[LOG_ID]`` must be URL-encoded. For example:
::
"projects/my-project-id/logs/syslog"
"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"
The permission ``logging.logEntries.create`` is needed on
each project, organization, billing account, or folder that
is receiving new log entries, whether the resource is
specified in ``logName`` or in an individual log entry.
resource (google.api.monitored_resource_pb2.MonitoredResource):
Optional. A default monitored resource object that is
assigned to all log entries in ``entries`` that do not
specify a value for ``resource``. Example:
::
{ "type": "gce_instance",
"labels": {
"zone": "us-central1-a", "instance_id": "00000000000000000000" }}
See [LogEntry][google.logging.v2.LogEntry].
labels (Sequence[google.cloud.logging_v2.types.WriteLogEntriesRequest.LabelsEntry]):
Optional. Default labels that are added to the ``labels``
field of all log entries in ``entries``. If a log entry
already has a label with the same key as a label in this
parameter, then the log entry's label is not changed. See
[LogEntry][google.logging.v2.LogEntry].
entries (Sequence[google.cloud.logging_v2.types.LogEntry]):
Required. The log entries to send to Logging. The order of
log entries in this list does not matter. Values supplied in
this method's ``log_name``, ``resource``, and ``labels``
fields are copied into those log entries in this list that
do not include values for their corresponding fields. For
more information, see the
[LogEntry][google.logging.v2.LogEntry] type.
If the ``timestamp`` or ``insert_id`` fields are missing in
log entries, then this method supplies the current time or a
unique identifier, respectively. The supplied values are
chosen so that, among the log entries that did not supply
their own values, the entries earlier in the list will sort
before the entries later in the list. See the
``entries.list`` method.
Log entries with timestamps that are more than the `logs
retention
period <https://cloud.google.com/logging/quota-policy>`__ in
the past or more than 24 hours in the future will not be
available when calling ``entries.list``. However, those log
entries can still be `exported with
LogSinks <https://cloud.google.com/logging/docs/api/tasks/exporting-logs>`__.
To improve throughput and to avoid exceeding the `quota
limit <https://cloud.google.com/logging/quota-policy>`__ for
calls to ``entries.write``, you should try to include
several log entries in this list, rather than calling this
method for each individual log entry.
partial_success (bool):
Optional. Whether valid entries should be written even if
some other entries fail due to INVALID_ARGUMENT or
PERMISSION_DENIED errors. If any entry is not written, then
the response status is the error associated with one of the
failed entries and the response includes error details keyed
by the entries' zero-based index in the ``entries.write``
method.
dry_run (bool):
Optional. If true, the request should expect
normal response, but the entries won't be
persisted nor exported. Useful for checking
whether the logging API endpoints are working
properly before sending valuable data.
"""
log_name = proto.Field(
proto.STRING,
number=1,
)
resource = proto.Field(
proto.MESSAGE,
number=2,
message=monitored_resource_pb2.MonitoredResource,
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=3,
)
entries = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=log_entry.LogEntry,
)
partial_success = proto.Field(
proto.BOOL,
number=5,
)
dry_run = proto.Field(
proto.BOOL,
number=6,
)
class WriteLogEntriesResponse(proto.Message):
r"""Result returned from WriteLogEntries.
"""
class WriteLogEntriesPartialErrors(proto.Message):
r"""Error details for WriteLogEntries with partial success.
Attributes:
log_entry_errors (Sequence[google.cloud.logging_v2.types.WriteLogEntriesPartialErrors.LogEntryErrorsEntry]):
When ``WriteLogEntriesRequest.partial_success`` is true,
records the error status for entries that were not written
due to a permanent error, keyed by the entry's zero-based
index in ``WriteLogEntriesRequest.entries``.
Failed requests for which no entries are written will not
include per-entry errors.
"""
log_entry_errors = proto.MapField(
proto.INT32,
proto.MESSAGE,
number=1,
message=status_pb2.Status,
)
class ListLogEntriesRequest(proto.Message):
r"""The parameters to ``ListLogEntries``.
Attributes:
resource_names (Sequence[str]):
Required. Names of one or more parent resources from which
to retrieve log entries:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
May alternatively be one or more views
projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
Projects listed in the ``project_ids`` field are added to
this list.
filter (str):
Optional. A filter that chooses which log entries to return.
See `Advanced Logs
Queries <https://cloud.google.com/logging/docs/view/advanced-queries>`__.
Only log entries that match the filter are returned. An
empty filter matches all log entries in the resources listed
in ``resource_names``. Referencing a parent resource that is
not listed in ``resource_names`` will cause the filter to
return no results. The maximum length of the filter is 20000
characters.
order_by (str):
Optional. How the results should be sorted. Presently, the
only permitted values are ``"timestamp asc"`` (default) and
``"timestamp desc"``. The first option returns entries in
order of increasing values of ``LogEntry.timestamp`` (oldest
first), and the second option returns entries in order of
decreasing timestamps (newest first). Entries with equal
timestamps are returned in order of their ``insert_id``
values.
page_size (int):
Optional. The maximum number of results to return from this
request. Default is 50. If the value is negative or exceeds
1000, the request is rejected. The presence of
``next_page_token`` in the response indicates that more
results might be available.
page_token (str):
Optional. If present, then retrieve the next batch of
results from the preceding call to this method.
``page_token`` must be the value of ``next_page_token`` from
the previous response. The values of other method parameters
should be identical to those in the previous call.
"""
resource_names = proto.RepeatedField(
proto.STRING,
number=8,
)
filter = proto.Field(
proto.STRING,
number=2,
)
order_by = proto.Field(
proto.STRING,
number=3,
)
page_size = proto.Field(
proto.INT32,
number=4,
)
page_token = proto.Field(
proto.STRING,
number=5,
)
class ListLogEntriesResponse(proto.Message):
r"""Result returned from ``ListLogEntries``.
Attributes:
entries (Sequence[google.cloud.logging_v2.types.LogEntry]):
A list of log entries. If ``entries`` is empty,
``nextPageToken`` may still be returned, indicating that
more entries may exist. See ``nextPageToken`` for more
information.
next_page_token (str):
If there might be more results than those appearing in this
response, then ``nextPageToken`` is included. To get the
next set of results, call this method again using the value
of ``nextPageToken`` as ``pageToken``.
If a value for ``next_page_token`` appears and the
``entries`` field is empty, it means that the search found
no log entries so far but it did not have time to search all
the possible log entries. Retry the method with this value
for ``page_token`` to continue the search. Alternatively,
consider speeding up the search by changing your filter to
specify a single log name or resource type, or to narrow the
time range of the search.
"""
@property
def raw_page(self):
return self
entries = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=log_entry.LogEntry,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class ListMonitoredResourceDescriptorsRequest(proto.Message):
r"""The parameters to ListMonitoredResourceDescriptors
Attributes:
page_size (int):
Optional. The maximum number of results to return from this
request. Non-positive values are ignored. The presence of
``nextPageToken`` in the response indicates that more
results might be available.
page_token (str):
Optional. If present, then retrieve the next batch of
results from the preceding call to this method.
``pageToken`` must be the value of ``nextPageToken`` from
the previous response. The values of other method parameters
should be identical to those in the previous call.
"""
page_size = proto.Field(
proto.INT32,
number=1,
)
page_token = proto.Field(
proto.STRING,
number=2,
)
class ListMonitoredResourceDescriptorsResponse(proto.Message):
r"""Result returned from ListMonitoredResourceDescriptors.
Attributes:
resource_descriptors (Sequence[google.api.monitored_resource_pb2.MonitoredResourceDescriptor]):
A list of resource descriptors.
next_page_token (str):
If there might be more results than those appearing in this
response, then ``nextPageToken`` is included. To get the
next set of results, call this method again using the value
of ``nextPageToken`` as ``pageToken``.
"""
@property
def raw_page(self):
return self
resource_descriptors = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=monitored_resource_pb2.MonitoredResourceDescriptor,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class ListLogsRequest(proto.Message):
r"""The parameters to ListLogs.
Attributes:
parent (str):
Required. The resource name that owns the logs:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]".
page_size (int):
Optional. The maximum number of results to return from this
request. Non-positive values are ignored. The presence of
``nextPageToken`` in the response indicates that more
results might be available.
page_token (str):
Optional. If present, then retrieve the next batch of
results from the preceding call to this method.
``pageToken`` must be the value of ``nextPageToken`` from
the previous response. The values of other method parameters
should be identical to those in the previous call.
resource_names (Sequence[str]):
Optional. The resource name that owns the logs:
projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
To support legacy queries, it could also be:
"projects/[PROJECT_ID]" "organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]" "folders/[FOLDER_ID]".
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
resource_names = proto.RepeatedField(
proto.STRING,
number=8,
)
class ListLogsResponse(proto.Message):
r"""Result returned from ListLogs.
Attributes:
log_names (Sequence[str]):
A list of log names. For example,
``"projects/my-project/logs/syslog"`` or
``"organizations/123/logs/cloudresourcemanager.googleapis.com%2Factivity"``.
next_page_token (str):
If there might be more results than those appearing in this
response, then ``nextPageToken`` is included. To get the
next set of results, call this method again using the value
of ``nextPageToken`` as ``pageToken``.
"""
@property
def raw_page(self):
return self
log_names = proto.RepeatedField(
proto.STRING,
number=3,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class TailLogEntriesRequest(proto.Message):
r"""The parameters to ``TailLogEntries``.
Attributes:
resource_names (Sequence[str]):
Required. Name of a parent resource from which to retrieve
log entries:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
May alternatively be one or more views:
"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]"
"organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]"
"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]".
filter (str):
Optional. A filter that chooses which log entries to return.
See `Advanced Logs
Filters <https://cloud.google.com/logging/docs/view/advanced_filters>`__.
Only log entries that match the filter are returned. An
empty filter matches all log entries in the resources listed
in ``resource_names``. Referencing a parent resource that is
not in ``resource_names`` will cause the filter to return no
results. The maximum length of the filter is 20000
characters.
buffer_window (google.protobuf.duration_pb2.Duration):
Optional. The amount of time to buffer log
entries at the server before being returned to
prevent out of order results due to late
arriving log entries. Valid values are between
0-60000 milliseconds. Defaults to 2000
milliseconds.
"""
resource_names = proto.RepeatedField(
proto.STRING,
number=1,
)
filter = proto.Field(
proto.STRING,
number=2,
)
buffer_window = proto.Field(
proto.MESSAGE,
number=3,
message=duration_pb2.Duration,
)
class TailLogEntriesResponse(proto.Message):
r"""Result returned from ``TailLogEntries``.
Attributes:
entries (Sequence[google.cloud.logging_v2.types.LogEntry]):
A list of log entries. Each response in the stream will
order entries with increasing values of
``LogEntry.timestamp``. Ordering is not guaranteed between
separate responses.
suppression_info (Sequence[google.cloud.logging_v2.types.TailLogEntriesResponse.SuppressionInfo]):
If entries that otherwise would have been
included in the session were not sent back to
the client, counts of relevant entries omitted
from the session with the reason that they were
not included. There will be at most one of each
reason per response. The counts represent the
number of suppressed entries since the last
streamed response.
"""
class SuppressionInfo(proto.Message):
r"""Information about entries that were omitted from the session.
Attributes:
reason (google.cloud.logging_v2.types.TailLogEntriesResponse.SuppressionInfo.Reason):
The reason that entries were omitted from the
session.
suppressed_count (int):
A lower bound on the count of entries omitted due to
``reason``.
"""
class Reason(proto.Enum):
r"""An indicator of why entries were omitted."""
REASON_UNSPECIFIED = 0
RATE_LIMIT = 1
NOT_CONSUMED = 2
reason = proto.Field(
proto.ENUM,
number=1,
enum='TailLogEntriesResponse.SuppressionInfo.Reason',
)
suppressed_count = proto.Field(
proto.INT32,
number=2,
)
entries = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=log_entry.LogEntry,
)
suppression_info = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=SuppressionInfo,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | software-dov.noreply@github.com |
5d53cc8f424a2e4af644b6cfad96ee5e9faf1337 | e831c22c8834030c22c54b63034e655e395d4efe | /Array/36-ValidSudoku.py | 021ad548ba09cc2b8417a947f7488b2aedd40882 | [] | no_license | szhmery/leetcode | a5eb1a393422b21f9fd4304b3bdc4a9db557858c | 9fcd1ec0686db45d24e2c52a7987d58c6ef545a0 | refs/heads/master | 2023-08-16T00:27:56.866626 | 2021-10-23T07:35:37 | 2021-10-23T07:35:37 | 331,875,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,043 | py | from typing import List
class Solution():
def isValidSudoku(self, board: List[List[str]]) -> bool:
rows = [{} for i in range(9)]
columns = [{} for i in range(9)]
boxes = [{} for i in range(9)]
for i in range(9):
for j in range(9):
num = board[i][j]
if num != '.':
num = int(num)
box_index = (i // 3) * 3 + j // 3
rows[i][num] = rows[i].get(num, 0) + 1
columns[j][num] = columns[j].get(num, 0) + 1
boxes[box_index][num] = boxes[box_index].get(num, 0) + 1
if rows[i][num] > 1 or columns[j][num] > 1 or boxes[box_index][num] > 1:
return False
return True
if __name__ == '__main__':
solution = Solution()
board = [["5", "3", ".", ".", "7", ".", ".", ".", "."]
, ["6", ".", ".", "1", "9", "5", ".", ".", "."]
, [".", "9", "8", ".", ".", ".", ".", "6", "."]
, ["8", ".", ".", ".", "6", ".", ".", ".", "3"]
, ["4", ".", ".", "8", ".", "3", ".", ".", "1"]
, ["7", ".", ".", ".", "2", ".", ".", ".", "6"]
, [".", "6", ".", ".", ".", ".", "2", "8", "."]
, [".", ".", ".", "4", "1", "9", ".", ".", "5"]
, [".", ".", ".", ".", "8", ".", ".", "7", "9"]]
is_valid = solution.isValidSudoku(board)
print("Is valid? -> {}".format(is_valid))
board = [["8", "3", ".", ".", "7", ".", ".", ".", "."]
, ["6", ".", ".", "1", "9", "5", ".", ".", "."]
, [".", "9", "8", ".", ".", ".", ".", "6", "."]
, ["8", ".", ".", ".", "6", ".", ".", ".", "3"]
, ["4", ".", ".", "8", ".", "3", ".", ".", "1"]
, ["7", ".", ".", ".", "2", ".", ".", ".", "6"]
, [".", "6", ".", ".", ".", ".", "2", "8", "."]
, [".", ".", ".", "4", "1", "9", ".", ".", "5"]
, [".", ".", ".", ".", "8", ".", ".", "7", "9"]]
is_valid = solution.isValidSudoku(board)
print("Is valid? -> {}".format(is_valid))
| [
"szhmery@gmail.com"
] | szhmery@gmail.com |
d7278abc6eb7faec64da695befeefb216e2a9c29 | ed12b604e0626c1393406d3495ef5bbaef136e8a | /Iniciante/Python/exercises from 1000 to 1099/exercise_1017.py | b2aa81cfeb034d2763bee49764fc7b6004986d30 | [] | no_license | NikolasMatias/urionlinejudge-exercises | 70200edfd2f9fc3889e024dface2579b7531ba65 | ca658ee8b2100e2b687c3a081555fa0770b86198 | refs/heads/main | 2023-09-01T20:33:53.150414 | 2023-08-21T07:07:32 | 2023-08-21T07:07:32 | 361,160,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | def calculateLitros(horasViagem, velocidadeMedia):
distancia = velocidadeMedia*horasViagem
qtdeLitros = distancia / 12.0
print("{:.3f}".format(qtdeLitros))
calculateLitros(int(input()), int(input())) | [
"nikolas.matias500@gmail.com"
] | nikolas.matias500@gmail.com |
235f90dbc5a2bfaf134a72fb2b5c6503e62e0fcc | 4cc285b0c585241ff4404087e6fbb901195639be | /NeuralNetworkNumbers/venv/Lib/site-packages/sklearn/covariance/_shrunk_covariance.py | 38d8f9e44a41fcd0a6b9e914976237455456d8e5 | [] | no_license | strazhg/NeuralNetworksPython | 815542f4ddbb86e918e657f783158f8c078de514 | 15038e44a5a6c342336c119cdd2abdeffd84b5b1 | refs/heads/main | 2023-04-16T18:51:29.602644 | 2021-04-27T14:46:55 | 2021-04-27T14:46:55 | 361,944,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:7eb0d2a7c9800d929396246a06bff3d5f7db239c9aa22020464f240899bb46d2
size 21096
| [
"golubstrazh@gmail.com"
] | golubstrazh@gmail.com |
0a4ecd1ec7cc3e33c5458bf9b06e84e770f48c95 | cad46af6291d48f3b3d7cc4fdf4357cae7243803 | /SDscript_Butterworth_GD_edit.py | 80e98c5a4e8ae15238fc2d74482d39e3f8ff4589 | [] | no_license | gddickinson/flika_scripts | f0977ff8911ba445a38db834a69cd7171c9bacf8 | 429de8bafc64e7a1e4a9f7828f7db38661cfe6d2 | refs/heads/master | 2023-06-23T16:24:13.843686 | 2023-06-15T21:34:59 | 2023-06-15T21:34:59 | 173,676,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,065 | py | # Instructions:
# First, open a window.
# Trim the window in time.
# Then black level subtract: 90 for LLS, 520 for TIRF.
# Set parameters.
# Run script.
####################################
### Set post-filter type ####
####################################
#postFilterType = 'butterworth'
postFilterType = 'savgol'
################################
#### Parameters ############
################################
sigma = 2 # Change this number if you want to vary the sigma of the gaussian blur
sampling_interval = 10 # frame duration in ms
q = 0.06 # for LLS: 10ms frame duration, q=0.02; 20ms frame duration, q=0.05
# for TIRF: 10ms frame duration, q=0.1902
#Butterworth filter options
low_cutoff = 1 # Hz
high_cutoff = 20 # Hz
filter_order = 3 # Increasing filter order increases the steepness of the filter rolloff
#Sav-Gol filter options
window_length = 21 # The length of the filter window (i.e. the number of coefficients). Must be a positive odd integer.
polyorder = 5 # The order of the polynomial used to fit the samples. polyorder must be less than window_length.
#Convolution filter options
boxcar_width = 150 # boxcar width in terms of ms
#######################################
## Run after specifying parameters ###
#######################################
from scipy.ndimage.filters import convolve
sampling_rate = 1/(sampling_interval/1000) # in Hz
try:
assert high_cutoff <= .5 * sampling_rate
except AssertionError:
print('High Frequency Cutoff is above the Nyquist frequency. Lower your high frequency cutoff')
high_cutoff_scaled = high_cutoff / (sampling_rate/2)
low_cutoff_scaled = low_cutoff / (sampling_rate/2)
boxcar_frames = int(np.round(boxcar_width / sampling_interval))
#For testing
#A = np.sqrt(10) * np.random.randn(10000, 10,10) + 10
#Window(A, 'original image')
nFrames = g.win.mt
prefilter = gaussian_blur(sigma, keepSourceWindow=True)
A = prefilter.image
if postFilterType == 'butterworth':
postfilter = butterworth_filter(filter_order, low_cutoff_scaled, high_cutoff_scaled, keepSourceWindow=True)
B = postfilter.image
prefilter.close()
#postfilter.close()
Window(A, 'original image -> gaussian blur')
if postFilterType == 'savgol':
if window_length % 2 != 1 or window_length < 1:
raise TypeError("window_length size must be a positive odd number")
if window_length < polyorder + 2:
raise TypeError("window_length is too small for the polynomials order")
B = scipy.signal.savgol_filter(A, window_length, polyorder, axis=0)
Window(B, 'original image -> gaussian blur -> savgol filtered')
mean_A = convolve(A, weights=np.full((boxcar_frames,1,1),1.0/boxcar_frames))
mean_B = convolve(B, weights=np.full((boxcar_frames,1,1),1.0/boxcar_frames))
B2 = B**2 # B squared
mean_B2 = convolve(B2, weights=np.full((boxcar_frames,1,1),1.0/boxcar_frames))
variance_B = mean_B2 - mean_B**2
stdev_B = np.sqrt(variance_B)
mean_A[mean_A<0] = 0 #removes negative values
Window(stdev_B - np.sqrt(q*mean_A), 'stdev minus sqrt mean') | [
"george.dickinson@gmail.com"
] | george.dickinson@gmail.com |
c8f3b5602a77ff20e816318a011523b2773f2071 | 02a68279e0d04340de4f87f7737b351cd6da1420 | /run_auxvae.py | 9fbf2250de6204d7cac36c03e1d5a7dc69e55abb | [
"MIT"
] | permissive | lim0606/AdversarialVariationalBayes | ffdbb875bae666f06913503b1bcd417c1b4e948f | 93487ca64007c8381e1ed5fc3d131b5da751ba47 | refs/heads/master | 2020-07-25T16:06:22.420746 | 2019-09-13T21:32:57 | 2019-09-13T21:32:57 | 208,349,706 | 0 | 0 | MIT | 2019-09-13T21:31:01 | 2019-09-13T21:31:01 | null | UTF-8 | Python | false | false | 4,363 | py | import os
import scipy.misc
import numpy as np
import argparse
from avb.utils import pp
from avb import inputs
from avb.auxvae.train import train
from avb.auxvae.test import test
from avb.decoders import get_decoder
from avb.auxvae.models import get_encoder, get_encoder_aux, get_decoder_aux
import tensorflow as tf
parser = argparse.ArgumentParser(description='Train and run a avae.')
parser.add_argument("--nsteps", default=200000, type=int, help="Iterations to train.")
parser.add_argument("--learning-rate", default=1e-4, type=float, help="Learning rate of for adam.")
parser.add_argument("--ntest", default=100, type=int, help="How often to run test code.")
parser.add_argument("--batch-size", default=64, type=int, help="The size of batch images.")
parser.add_argument("--image-size", default=108, type=int, help="The size of image to use (will be center cropped).")
parser.add_argument("--output-size", default=64, type=int, help="The size of the output images to produce.")
parser.add_argument("--encoder", default="conv0", type=str, help="Architecture to use.")
parser.add_argument("--decoder", default="conv0", type=str, help="Architecture to use.")
parser.add_argument("--adversary", default="conv0", type=str, help="Architecture to use.")
parser.add_argument("--c-dim", default=3, type=int, help="Dimension of image color. ")
parser.add_argument("--z-dim", default=100, type=int, help="Dimension of latent space.")
parser.add_argument("--a-dim", default=100, type=int, help="Dimension for auxiliary variables.")
parser.add_argument("--z-dist", default="gauss", type=str, help="Prior distribution of latent space.")
parser.add_argument("--cond-dist", default="gauss", type=str, help="Conditional distribution.")
parser.add_argument("--anneal-steps", default="0", type=int, help="How many steps to use for annealing.")
parser.add_argument("--is-anneal", default=False, action='store_true', help="True for training, False for testing.")
parser.add_argument("--dataset", default="celebA", type=str, help="The name of dataset.")
parser.add_argument("--data-dir", default="data", type=str, help="Path to the data directory.")
parser.add_argument('--split-dir', default='data/splits', type=str, help='Folder where splits are found')
parser.add_argument("--log-dir", default="tf_logs", type=str, help="Directory name to save the checkpoints.")
parser.add_argument("--sample-dir", default="samples", type=str, help="Directory name to save the image samples.")
parser.add_argument("--eval-dir", default="eval", type=str, help="Directory where to save logs.")
parser.add_argument("--is-train", default=False, action='store_true', help="True for training, False for testing.")
parser.add_argument("--is-01-range", default=False, action='store_true', help="If image is constrained to values between 0 and 1.")
parser.add_argument("--test-nite", default=0, type=int, help="Number of iterations of ite.")
parser.add_argument("--test-nais", default=10, type=int, help="Number of iterations of ais.")
parser.add_argument("--test-ais-nchains", default=16, type=int, help="Number of chains for ais.")
parser.add_argument("--test-ais-nsteps", default=100, type=int, help="Number of annealing steps for ais.")
parser.add_argument("--test-ais-eps", default=1e-2, type=float, help="Stepsize for AIS.")
parser.add_argument("--test-is-center-posterior", default=False, action='store_true', help="Wether to center posterior plots.")
def main():
args = parser.parse_args()
config = vars(args)
config['gf_dim'] = 64
config['df_dim'] = 64
config['test_is_adaptive_eps'] = False
pp.pprint(config)
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
if not os.path.exists(args.sample_dir):
os.makedirs(args.sample_dir)
decoder = get_decoder(args.decoder, config)
encoder = get_encoder(args.encoder, config)
decoder_aux = get_decoder_aux(args.encoder, config)
encoder_aux = get_encoder_aux(args.encoder, config)
if args.is_train:
x_train = inputs.get_inputs('train', config)
x_val = inputs.get_inputs('val', config)
train(encoder, decoder, encoder_aux, decoder_aux, x_train, x_val, config)
else:
x_test = inputs.get_inputs('test', config)
test(encoder, decoder, encoder_aux, decoder_aux, x_test, config)
if __name__ == '__main__':
main()
| [
"lars.mescheder@tuebingen.mpg.de"
] | lars.mescheder@tuebingen.mpg.de |
a12e92b19c21b0082dfaee4fd0e55de9baa0a579 | 963b4cf9fe1de845d994d0c8d3c9bb3def326b5b | /SomeProgs/Python Stuff/Coding Assignments/MeanMedianMode.py | 227d8e60f3814bd424ed898824903f4285954e58 | [] | no_license | lws803/cs1010_A0167 | aa727bdf029168238674d84ea6ce9c75905b8971 | 5759332364909ee1d2eb9c26b0d95d4dc153656f | refs/heads/master | 2022-03-13T02:52:26.488846 | 2019-11-14T20:53:15 | 2019-11-14T20:53:15 | 105,607,027 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,352 | py | # Name: Tan Tze Guang
# Class: 13S28
# Date: 26 March 2013
# This program finds the mean, median and mode of a list of numbers
def mean(List):
sums = 0 # A temporary storage for sum of numbers in list
for items in range(len(List)): # Sums all numbers in the list
sums = sums + items
mean_num = sums/len(List) # Finds the 'Mean' here
return mean_num
def median(List):
List.sort() # Changes the list into numerical order
count = len(List)
check = count % 2 # Checks whether the nuber is odd or even
if check == 0: # Even number
median_num = (List[(len(List))//2] + List[(len(List)//2)+1])/2
return median_num
if check == 1: # Odd number
median_num = List[(len(List)//2)]
return median_num
def mode(List): # Currently only can find 1 mode
# Multiple modes will cause the smaller mode to be removed
List.sort()
frequency = [] # Creates a list to store values of frequency of value
count = len(List) - 1
for items in List:
freq = 0 # Ensures that freq is reset after every loop
freq = List.count(items)
frequency.append(freq)
print("This is the current List:",List)
print("Frequency of numbers is:",frequency)
while count > 0: # This is to remove all non-mode numbers
if frequency[0] == frequency[1]:
List.pop(0)
frequency.pop(0)
elif frequency[0] > frequency[1]:
List.pop(1)
frequency.pop(1)
elif frequency[0] < frequency[1]:
List.pop(0)
frequency.pop(0)
count = count - 1
return List[0]
def main():
print("This program finds the mean,median and mode of a list of numbers.")
print("Currently, the program is only able to find 1 mode.")
print("In the case of multiple modes, the smaller mode will be removed.")
print("")
numbers = [8,6,7,9,9,6,4,4,6,8,9,9,9,8,7,7,6]
print("The list has",len(numbers),"numbers")
print()
mean_number = mean(numbers)
print("The mean of this list of numbers is",mean_number)
print()
median_number = median(numbers)
print("The median of this list of numbers is",median_number)
print()
mode_number = mode(numbers)
print("The mode of this list of numbers is",mode_number)
main()
| [
"omnikron96@gmail.com"
] | omnikron96@gmail.com |
8cbd3d3a3c5c8ba27f17dd965a9dea8c48d53f51 | 186c04fff4c0ca95c12c3f8a117c7c95ce70b2e4 | /spacy/lang/nb/lemmatizer/_adverbs_wordforms.py | 1e97bcf4284f088698063e996a88c32ae3ae9a0b | [
"MIT"
] | permissive | IndicoDataSolutions/spaCy | 58c9d4bcc854ab278516c050dee8b2b92f528a8c | 4e28b5acd702691e5ce64cf7a1f9da4af5a271b5 | refs/heads/master | 2023-01-01T05:35:35.393944 | 2022-03-03T19:21:34 | 2022-03-03T19:21:34 | 60,737,647 | 5 | 2 | MIT | 2022-12-22T01:57:03 | 2016-06-08T23:30:42 | Python | UTF-8 | Python | false | false | 21,017 | py | # coding: utf8
"""
All wordforms are extracted from Norsk Ordbank in Norwegian Bokmål 2005
(CLARINO NB - Språkbanken), Nasjonalbiblioteket, Norway:
https://www.nb.no/sprakbanken/show?serial=oai%3Anb.no%3Asbr-5&lang=en
License:
Creative_Commons-BY (CC-BY) (https://creativecommons.org/licenses/by/4.0/)
"""
from __future__ import unicode_literals
ADVERBS_WORDFORMS = {
'à jour': ('à jour',),
'à la carte': ('à la carte',),
'à la grecque': ('à la grecque',),
'à la mode': ('à la mode',),
'òg': ('òg',),
'a': ('a',),
'a cappella': ('a cappella',),
'a conto': ('a conto',),
'a konto': ('a konto',),
'a posteriori': ('a posteriori',),
'a prima vista': ('a prima vista',),
'a priori': ('a priori',),
'a tempo': ('a tempo',),
'a verbo': ('a verbo',),
'a viso': ('a viso',),
'a vista': ('a vista',),
'ad absurdum': ('ad absurdum',),
'ad acta': ('ad acta',),
'ad hoc': ('ad hoc',),
'ad infinitum': ('ad infinitum',),
'ad notam': ('ad notam',),
'ad undas': ('ad undas',),
'adagio': ('adagio',),
'akkurat': ('akkurat',),
'al fresco': ('al fresco',),
'al secco': ('al secco',),
'aldeles': ('aldeles',),
'alders tid': ('alders tid',),
'aldri': ('aldri',),
'aleine': ('aleine',),
'alene': ('alene',),
'alias': ('alias',),
'allegretto': ('allegretto',),
'allegro': ('allegro',),
'aller': ('aller',),
'allerede': ('allerede',),
'allikevel': ('allikevel',),
'alltid': ('alltid',),
'alltids': ('alltids',),
'alt': ('alt',),
'altfor': ('altfor',),
'altså': ('altså',),
'amok': ('amok',),
'an': ('an',),
'ana': ('ana',),
'andante': ('andante',),
'andantino': ('andantino',),
'andelsvis': ('andelsvis',),
'andfares': ('andfares',),
'andføttes': ('andføttes',),
'annetsteds': ('annetsteds',),
'annetstedsfra': ('annetstedsfra',),
'annetstedshen': ('annetstedshen',),
'anno': ('anno',),
'anslagsvis': ('anslagsvis',),
'anstendigvis': ('anstendigvis',),
'anstigende': ('anstigende',),
'antakeligvis': ('antakeligvis',),
'antydningsvis': ('antydningsvis',),
'apropos': ('apropos',),
'argende': ('argende',),
'at': ('at',),
'atter': ('atter',),
'attpåtil': ('attpåtil',),
'attåt': ('attåt',),
'au': ('au',),
'avdelingsvis': ('avdelingsvis',),
'avdragsvis': ('avdragsvis',),
'avhendes': ('avhendes',),
'avhends': ('avhends',),
'avsatsvis': ('avsatsvis',),
'bakk': ('bakk',),
'baklengs': ('baklengs',),
'bare': ('bare',),
'bataljonsvis': ('bataljonsvis',),
'bekende': ('bekende',),
'belgende': ('belgende',),
'betids': ('betids',),
'bi': ('bi',),
'bidevind': ('bidevind',),
'bis': ('bis',),
'bitevis': ('bitevis',),
'bitte': ('bitte',),
'bitterlig': ('bitterlig',),
'blanko': ('blanko',),
'blidelig': ('blidelig',),
'blikk': ('blikk',),
'blikkende': ('blikkende',),
'blottende': ('blottende',),
'bom': ('bom',),
'bommende': ('bommende',),
'bona fide': ('bona fide',),
'brennfort': ('brennfort',),
'brutto': ('brutto',),
'bråtevis': ('bråtevis',),
'bums': ('bums',),
'buntevis': ('buntevis',),
'buntvis': ('buntvis',),
'bus': ('bus',),
'cantabile': ('cantabile',),
'cf': ('cf',),
'cif': ('cif',),
'cirka': ('cirka',),
'crescendo': ('crescendo',),
'da': ('da',),
'dagevis': ('dagevis',),
'dagstøtt': ('dagstøtt',),
'dakapo': ('dakapo',),
'dam': ('dam',),
'dammende': ('dammende',),
'dann': ('dann',),
'de facto': ('de facto',),
'de jure': ('de jure',),
'decrescendo': ('decrescendo',),
'delkredere': ('delkredere',),
'dels': ('dels',),
'delvis': ('delvis',),
'derav': ('derav',),
'deretter': ('deretter',),
'derfor': ('derfor',),
'derimot': ('derimot',),
'dermed': ('dermed',),
'dernest': ('dernest',),
'dess': ('dess',),
'dessuten': ('dessuten',),
'dessverre': ('dessverre',),
'desto': ('desto',),
'diminuendo': ('diminuendo',),
'dis': ('dis',),
'dog': ('dog',),
'dolce': ('dolce',),
'dorgende': ('dorgende',),
'dryppende': ('dryppende',),
'drøssevis': ('drøssevis',),
'dus': ('dus',),
'dusinvis': ('dusinvis',),
'dyende': ('dyende',),
'døgnvis': ('døgnvis',),
'dønn': ('dønn',),
'dørg': ('dørg',),
'dørgende': ('dørgende',),
'dørimellom': ('dørimellom',),
'ei': ('ei',),
'eiende': ('eiende',),
'einkom': ('einkom',),
'eitrende': ('eitrende',),
'eks': ('eks',),
'eksempelvis': ('eksempelvis',),
'ekspress': ('ekspress',),
'ekstempore': ('ekstempore',),
'eldende': ('eldende',),
'eldende': ('eldende',),
'ellers': ('ellers',),
'en': ('en',),
'en bloc': ('en bloc',),
'en detail': ('en detail',),
'en face': ('en face',),
'en gros': ('en gros',),
'en masse': ('en masse',),
'en passant': ('en passant',),
'en profil': ('en profil',),
'en suite': ('en suite',),
'enda': ('enda',),
'endatil': ('endatil',),
'ende': ('ende',),
'ender': ('ender',),
'endog': ('endog',),
'ene': ('ene',),
'engang': ('engang',),
'enkeltvis': ('enkeltvis',),
'enkom': ('enkom',),
'enn': ('enn',),
'ennå': ('ennå',),
'eo ipso': ('eo ipso',),
'ergo': ('ergo',),
'et cetera': ('et cetera',),
'etappevis': ('etappevis',),
'etterhånden': ('etterhånden',),
'etterpå': ('etterpå',),
'etterskottsvis': ('etterskottsvis',),
'etterskuddsvis': ('etterskuddsvis',),
'ex animo': ('ex animo',),
'ex auditorio': ('ex auditorio',),
'ex cathedra': ('ex cathedra',),
'ex officio': ('ex officio',),
'fas': ('fas',),
'fatt': ('fatt',),
'fatt': ('fatt',),
'feil': ('feil',),
'femti-femti': ('femti-femti',),
'fifty-fifty': ('fifty-fifty',),
'flekkevis': ('flekkevis',),
'flokkevis': ('flokkevis',),
'fluks': ('fluks',),
'fluksens': ('fluksens',),
'flunkende': ('flunkende',),
'flust': ('flust',),
'fly': ('fly',),
'fob': ('fob',),
'for': ('for',),
'for lengst': ('for lengst',),
'for resten': ('for resten',),
'for så vidt': ('for så vidt',),
'for visst': ('for visst',),
'for øvrig': ('for øvrig',),
'fordevind': ('fordevind',),
'fordum': ('fordum',),
'fore': ('fore',),
'forhakkende': ('forhakkende',),
'forholdsvis': ('forholdsvis',),
'forhåpentlig': ('forhåpentlig',),
'forhåpentligvis': ('forhåpentligvis',),
'forlengs': ('forlengs',),
'formelig': ('formelig',),
'forresten': ('forresten',),
'forsøksvis': ('forsøksvis',),
'forte': ('forte',),
'fortfarende': ('fortfarende',),
'fortissimo': ('fortissimo',),
'fortrinnsvis': ('fortrinnsvis',),
'framleis': ('framleis',),
'framlengs': ('framlengs',),
'framstupes': ('framstupes',),
'framstups': ('framstups',),
'franko': ('franko',),
'free on board': ('free on board',),
'free on rail': ('free on rail',),
'fremdeles': ('fremdeles',),
'fremlengs': ('fremlengs',),
'fremstupes': ('fremstupes',),
'fremstups': ('fremstups',),
'furioso': ('furioso',),
'fylkesvis': ('fylkesvis',),
'følgelig': ('følgelig',),
'først': ('først',),
'ganske': ('ganske',),
'gid': ('gid',),
'givetvis': ('givetvis',),
'gjerne': ('gjerne',),
'gladelig': ('gladelig',),
'glimtvis': ('glimtvis',),
'glissando': ('glissando',),
'glugg': ('glugg',),
'gorr': ('gorr',),
'gorrende': ('gorrende',),
'gradvis': ('gradvis',),
'grandioso': ('grandioso',),
'granngivelig': ('granngivelig',),
'grassat': ('grassat',),
'grave': ('grave',),
'gruppevis': ('gruppevis',),
'gudskjelov': ('gudskjelov',),
'gullende': ('gullende',),
'gørr': ('gørr',),
'gørrende': ('gørrende',),
'hakk': ('hakk',),
'hakkende': ('hakkende',),
'halvveis': ('halvveis',),
'haugevis': ('haugevis',),
'heden': ('heden',),
'heiman': ('heiman',),
'heldigvis': ('heldigvis',),
'heller': ('heller',),
'helst': ('helst',),
'henholdsvis': ('henholdsvis',),
'herre': ('herre',),
'hersens': ('hersens',),
'himlende': ('himlende',),
'hodekulls': ('hodekulls',),
'hodestupes': ('hodestupes',),
'hodestups': ('hodestups',),
'hoggende': ('hoggende',),
'honoris causa': ('honoris causa',),
'hoppende': ('hoppende',),
'hulter': ('hulter',),
'hundretusenvis': ('hundretusenvis',),
'hundrevis': ('hundrevis',),
'hurra-meg-rundt': ('hurra-meg-rundt',),
'hvi': ('hvi',),
'hvor': ('hvor',),
'hvorav': ('hvorav',),
'hvordan': ('hvordan',),
'hvorfor': ('hvorfor',),
'hånt': ('hånt',),
'høylig': ('høylig',),
'høyst': ('høyst',),
'i alle fall': ('i alle fall',),
'i stedet': ('i stedet',),
'iallfall': ('iallfall',),
'ibidem': ('ibidem',),
'id est': ('id est',),
'igjen': ('igjen',),
'ikke': ('ikke',),
'ildende': ('ildende',),
'ildende': ('ildende',),
'imens': ('imens',),
'imidlertid': ('imidlertid',),
'in absentia': ('in absentia',),
'in absurdum': ('in absurdum',),
'in blanko': ('in blanko',),
'in casu': ('in casu',),
'in contumaciam': ('in contumaciam',),
'in corpore': ('in corpore',),
'in duplo': ('in duplo',),
'in extenso': ('in extenso',),
'in flagranti': ('in flagranti',),
'in honorem': ('in honorem',),
'in medias res': ('in medias res',),
'in memoriam': ('in memoriam',),
'in mente': ('in mente',),
'in natura': ('in natura',),
'in nuce': ('in nuce',),
'in persona': ('in persona',),
'in quarto': ('in quarto',),
'in saldo': ('in saldo',),
'in salvo': ('in salvo',),
'in situ': ('in situ',),
'in solidum': ('in solidum',),
'in spe': ('in spe',),
'in triplo': ('in triplo',),
'in vitro': ('in vitro',),
'in vivo': ('in vivo',),
'ingenlunde': ('ingenlunde',),
'ingensteds': ('ingensteds',),
'inkognito': ('inkognito',),
'innenat': ('innenat',),
'innledningsvis': ('innledningsvis',),
'innleiingsvis': ('innleiingsvis',),
'isteden': ('isteden',),
'især': ('især',),
'item': ('item',),
'ja menn': ('ja menn',),
'ja så menn': ('ja så menn',),
'jammen': ('jammen',),
'jamnlig': ('jamnlig',),
'jamsides': ('jamsides',),
'jamt over': ('jamt over',),
'jamvel': ('jamvel',),
'jaså': ('jaså',),
'jevnlig': ('jevnlig',),
'jevnsides': ('jevnsides',),
'jevnt over': ('jevnt over',),
'jo menn': ('jo menn',),
'jommen': ('jommen',),
'just': ('just',),
'kanon': ('kanon',),
'kanskje': ('kanskje',),
'kav': ('kav',),
'kavende': ('kavende',),
'kilovis': ('kilovis',),
'klin': ('klin',),
'klink': ('klink',),
'klinkende': ('klinkende',),
'klokelig': ('klokelig',),
'knakende': ('knakende',),
'knapt': ('knapt',),
'knasende': ('knasende',),
'knekkende': ('knekkende',),
'knøtrende': ('knøtrende',),
'knøttende': ('knøttende',),
'kolende': ('kolende',),
'kul': ('kul',),
'kuli': ('kuli',),
'kun': ('kun',),
'kvartalsvis': ('kvartalsvis',),
'kvekk': ('kvekk',),
'kølende': ('kølende',),
'lagerfritt': ('lagerfritt',),
'lagom': ('lagom',),
'lagvis': ('lagvis',),
'larghetto': ('larghetto',),
'largo': ('largo',),
'lassevis': ('lassevis',),
'legato': ('legato',),
'leilighetsvis': ('leilighetsvis',),
'lell': ('lell',),
'lenger': ('lenger',),
'liddelig': ('liddelig',),
'like': ('like',),
'likeledes': ('likeledes',),
'likeså': ('likeså',),
'likevel': ('likevel',),
'likså': ('likså',),
'lissom': ('lissom',),
'litervis': ('litervis',),
'livende': ('livende',),
'lovformelig': ('lovformelig',),
'lovlig': ('lovlig',),
'lukt': ('lukt',),
'lut': ('lut',),
'luta': ('luta',),
'lutende': ('lutende',),
'lykkeligvis': ('lykkeligvis',),
'lynfort': ('lynfort',),
'lys': ('lys',),
'maestoso': ('maestoso',),
'mala fide': ('mala fide',),
'malapropos': ('malapropos',),
'massevis': ('massevis',),
'med rette': ('med rette',),
'medio': ('medio',),
'medium': ('medium',),
'meget': ('meget',),
'mengdevis': ('mengdevis',),
'metervis': ('metervis',),
'mezzoforte': ('mezzoforte',),
'midsommers': ('midsommers',),
'midsommers': ('midsommers',),
'midt': ('midt',),
'midtsommers': ('midtsommers',),
'midtsommers': ('midtsommers',),
'midtvinters': ('midtvinters',),
'midvinters': ('midvinters',),
'milevis': ('milevis',),
'millionvis': ('millionvis',),
'min sann': ('min sann',),
'min sant': ('min sant',),
'min santen': ('min santen',),
'minus': ('minus',),
'mo': ('mo',),
'molto': ('molto',),
'motsols': ('motsols',),
'motstrøms': ('motstrøms',),
'mukk': ('mukk',),
'mukkende': ('mukkende',),
'muligens': ('muligens',),
'muligvis': ('muligvis',),
'murende': ('murende',),
'musende': ('musende',),
'mutters': ('mutters',),
'månedsvis': ('månedsvis',),
'naggende': ('naggende',),
'naturligvis': ('naturligvis',),
'nauende': ('nauende',),
'navnlig': ('navnlig',),
'neigu': ('neigu',),
'neimen': ('neimen',),
'nemlig': ('nemlig',),
'neppe': ('neppe',),
'nesegrus': ('nesegrus',),
'nest': ('nest',),
'nesten': ('nesten',),
'netto': ('netto',),
'nettopp': ('nettopp',),
'noenlunde': ('noenlunde',),
'noensinne': ('noensinne',),
'noensteds': ('noensteds',),
'nok': ('nok',),
'nok': ('nok',),
'noksom': ('noksom',),
'nokså': ('nokså',),
'non stop': ('non stop',),
'nonstop': ('nonstop',),
'notabene': ('notabene',),
'nu': ('nu',),
'nylig': ('nylig',),
'nyss': ('nyss',),
'nå': ('nå',),
'når': ('når',),
'nåvel': ('nåvel',),
'nære': ('nære',),
'nærere': ('nærere',),
'nærest': ('nærest',),
'nærmere': ('nærmere',),
'nærmest': ('nærmest',),
'nødvendigvis': ('nødvendigvis',),
'offside': ('offside',),
'også': ('også',),
'om att': ('om att',),
'om igjen': ('om igjen',),
'omme': ('omme',),
'omsider': ('omsider',),
'omsonst': ('omsonst',),
'omtrent': ('omtrent',),
'onnimellom': ('onnimellom',),
'opp att': ('opp att',),
'opp ned': ('opp ned',),
'oppad': ('oppad',),
'oppstrøms': ('oppstrøms',),
'oven': ('oven',),
'overalt': ('overalt',),
'overens': ('overens',),
'overhodet': ('overhodet',),
'overlag': ('overlag',),
'overmorgen': ('overmorgen',),
'overmåte': ('overmåte',),
'overvettes': ('overvettes',),
'pakkende': ('pakkende',),
'pal': ('pal',),
'par avion': ('par avion',),
'par excellence': ('par excellence',),
'parlando': ('parlando',),
'pars pro toto': ('pars pro toto',),
'partout': ('partout',),
'parvis': ('parvis',),
'per capita': ('per capita',),
'peu à peu': ('peu à peu',),
'peu om peu': ('peu om peu',),
'pianissimo': ('pianissimo',),
'piano': ('piano',),
'pinende': ('pinende',),
'pinnende': ('pinnende',),
'pist': ('pist',),
'pizzicato': ('pizzicato',),
'pladask': ('pladask',),
'plent': ('plent',),
'plenty': ('plenty',),
'pluss': ('pluss',),
'porsjonsvis': ('porsjonsvis',),
'portamento': ('portamento',),
'portato': ('portato',),
'post festum': ('post festum',),
'post meridiem': ('post meridiem',),
'post mortem': ('post mortem',),
'prestissimo': ('prestissimo',),
'presto': ('presto',),
'prima vista': ('prima vista',),
'primo': ('primo',),
'pro anno': ('pro anno',),
'pro persona': ('pro persona',),
'pro tempore': ('pro tempore',),
'proforma': ('proforma',),
'prompt': ('prompt',),
'prompte': ('prompte',),
'proppende': ('proppende',),
'prosentvis': ('prosentvis',),
'pukka': ('pukka',),
'puljevis': ('puljevis',),
'punktvis': ('punktvis',),
'pyton': ('pyton',),
'pø om pø': ('pø om pø',),
'quantum satis': ('quantum satis',),
'rammende': ('rammende',),
'rangsøles': ('rangsøles',),
'rasende': ('rasende',),
'ratevis': ('ratevis',),
'ratt': ('ratt',),
'rav': ('rav',),
'ravende': ('ravende',),
'reint': ('reint',),
'rent': ('rent',),
'respektive': ('respektive',),
'rettsøles': ('rettsøles',),
'reverenter': ('reverenter',),
'riktig nok': ('riktig nok',),
'riktignok': ('riktignok',),
'rimeligvis': ('rimeligvis',),
'ringside': ('ringside',),
'rispende': ('rispende',),
'ritardando': ('ritardando',),
'riv': ('riv',),
'rubato': ('rubato',),
'ruskende': ('ruskende',),
'rykkevis': ('rykkevis',),
'saktelig': ('saktelig',),
'saktens': ('saktens',),
'sammen': ('sammen',),
'samstundes': ('samstundes',),
'samt': ('samt',),
'sann': ('sann',),
'sannelig': ('sannelig',),
'sannsynligvis': ('sannsynligvis',),
'sans phrase': ('sans phrase',),
'scilicet': ('scilicet',),
'seinhøstes': ('seinhøstes',),
'senhøstes': ('senhøstes',),
'sia': ('sia',),
'sic': ('sic',),
'sidelengs': ('sidelengs',),
'siden': ('siden',),
'sideveges': ('sideveges',),
'sidevegs': ('sidevegs',),
'sideveis': ('sideveis',),
'sikkerlig': ('sikkerlig',),
'silde': ('silde',),
'simpelthen': ('simpelthen',),
'sine anno': ('sine anno',),
'sjelden': ('sjelden',),
'sjøleies': ('sjøleies',),
'sjøleis': ('sjøleis',),
'sjøverts': ('sjøverts',),
'skeis': ('skeis',),
'skiftevis': ('skiftevis',),
'skita': ('skita',),
'skjøns': ('skjøns',),
'skogleies': ('skogleies',),
'skokkevis': ('skokkevis',),
'skrevs': ('skrevs',),
'skrittvis': ('skrittvis',),
'skrås': ('skrås',),
'skyllende': ('skyllende',),
'skåldende': ('skåldende',),
'slettes': ('slettes',),
'sluttelig': ('sluttelig',),
'smekk': ('smekk',),
'smellende': ('smellende',),
'småningom': ('småningom',),
'sneisevis': ('sneisevis',),
'snesevis': ('snesevis',),
'snuft': ('snuft',),
'snupt': ('snupt',),
'snyt': ('snyt',),
'snyta': ('snyta',),
'snyte': ('snyte',),
'solo': ('solo',),
'sommerstid': ('sommerstid',),
'spenna': ('spenna',),
'spent': ('spent',),
'spika': ('spika',),
'spikende': ('spikende',),
'spildrende': ('spildrende',),
'spill': ('spill',),
'splinter': ('splinter',),
'splitter': ('splitter',),
'sporenstreks': ('sporenstreks',),
'sprangvis': ('sprangvis',),
'sprell': ('sprell',),
'sprut': ('sprut',),
'sprutende': ('sprutende',),
'sprøyte': ('sprøyte',),
'stakkato': ('stakkato',),
'stapp': ('stapp',),
'stappa': ('stappa',),
'stappende': ('stappende',),
'staurende': ('staurende',),
'stedvis': ('stedvis',),
'steika': ('steika',),
'stein': ('stein',),
'steinsens': ('steinsens',),
'stokk': ('stokk',),
'stokkende': ('stokkende',),
'straks': ('straks',),
'stringendo': ('stringendo',),
'stummende': ('stummende',),
'stundimellom': ('stundimellom',),
'stundom': ('stundom',),
'stundomtil': ('stundomtil',),
'stupende': ('stupende',),
'styggelig': ('styggelig',),
'styggende': ('styggende',),
'stykkevis': ('stykkevis',),
'støtt': ('støtt',),
'støtvis': ('støtvis',),
'støytvis': ('støytvis',),
'sub rosa': ('sub rosa',),
'summa summarum': ('summa summarum',),
'surr': ('surr',),
'svinaktig': ('svinaktig',),
'sydøst': ('sydøst',),
'synderlig': ('synderlig',),
'så': ('så',),
'så pass': ('så pass',),
'sågar': ('sågar',),
'således': ('således',),
'såleis': ('såleis',),
'såpass': ('såpass',),
'såre': ('såre',),
'særdeles': ('særdeles',),
'særs': ('særs',),
'søkk': ('søkk',),
'søkkende': ('søkkende',),
'sønder': ('sønder',),
'takimellom': ('takimellom',),
'takomtil': ('takomtil',),
'temmelig': ('temmelig',),
'ti': ('ti',),
'tidligdags': ('tidligdags',),
'tidsnok': ('tidsnok',),
'tidvis': ('tidvis',),
'tilfeldigvis': ('tilfeldigvis',),
'tilmed': ('tilmed',),
'tilnærmelsesvis': ('tilnærmelsesvis',),
'timevis': ('timevis',),
'tjokkende': ('tjokkende',),
'tomreipes': ('tomreipes',),
'tott': ('tott',),
'trill': ('trill',),
'trillende': ('trillende',),
'trinnvis': ('trinnvis',),
'troppevis': ('troppevis',),
'troppo': ('troppo',),
'troppsvis': ('troppsvis',),
'trutt': ('trutt',),
'turevis': ('turevis',),
'turvis': ('turvis',),
'tusenfold': ('tusenfold',),
'tusenvis': ('tusenvis',),
'tvers': ('tvers',),
'tvert': ('tvert',),
'tydeligvis': ('tydeligvis',),
'tynnevis': ('tynnevis',),
'tynnevis': ('tynnevis',),
'tålig': ('tålig',),
'tønnevis': ('tønnevis',),
'tønnevis': ('tønnevis',),
'ufravendt': ('ufravendt',),
'ugjerne': ('ugjerne',),
'uheldigvis': ('uheldigvis',),
'ukevis': ('ukevis',),
'ukevis': ('ukevis',),
'ulykkeligvis': ('ulykkeligvis',),
'uløyves': ('uløyves',),
'underhånden': ('underhånden',),
'undertiden': ('undertiden',),
'unntakelsesvis': ('unntakelsesvis',),
'unntaksvis': ('unntaksvis',),
'ustyggelig': ('ustyggelig',),
'utaboks': ('utaboks',),
'utbygdes': ('utbygdes',),
'utdragsvis': ('utdragsvis',),
'utelukkende': ('utelukkende',),
'utenat': ('utenat',),
'utenboks': ('utenboks',),
'uvegerlig': ('uvegerlig',),
'uviselig': ('uviselig',),
'uvislig': ('uvislig',),
'va banque': ('va banque',),
'vanligvis': ('vanligvis',),
'vann': ('vann',),
'vekevis': ('vekevis',),
'vekevis': ('vekevis',),
'vekselvis': ('vekselvis',),
'vel': ('vel',),
'vibrato': ('vibrato',),
'vice versa': ('vice versa',),
'vide': ('vide',),
'viden': ('viden',),
'vinterstid': ('vinterstid',),
'viselig': ('viselig',),
'visselig': ('visselig',),
'visst': ('visst',),
'visst nok': ('visst nok',),
'visstnok': ('visstnok',),
'vivace': ('vivace',),
'vonlig': ('vonlig',),
'vonom': ('vonom',),
'vonoms': ('vonoms',),
'vrangsøles': ('vrangsøles',),
'ytterlig': ('ytterlig',),
'åkkesom': ('åkkesom',),
'årevis': ('årevis',),
'årlig års': ('årlig års',),
'åssen': ('åssen',),
'ørende': ('ørende',),
'øyensynlig': ('øyensynlig',),
'antageligvis': ('antageligvis',),
'coolly': ('coolly',),
'kor': ('kor',),
'korfor': ('korfor',),
'kor': ('kor',),
'korfor': ('korfor',),
'medels': ('medels',),
'nasegrus': ('nasegrus',),
'overimorgen': ('overimorgen',),
'unntagelsesvis': ('unntagelsesvis',),
'åffer': ('åffer',),
'åffer': ('åffer',),
'sist': ('sist',),
'seinhaustes': ('seinhaustes',),
'stetse': ('stetse',),
'stikk': ('stikk',),
'storlig': ('storlig',),
'A': ('A',),
'for': ('for',),
'benveges': ('benveges',),
'bunkevis': ('bunkevis',),
'selv': ('selv',),
'sjøl': ('sjøl',),
'skauleies': ('skauleies',),
'da capo': ('da capo',),
'beint frem': ('beint frem',),
'beintfrem': ('beintfrem',),
'beinveges': ('beinveges',),
'beinvegs': ('beinvegs',),
'beinveis': ('beinveis',),
'benvegs': ('benvegs',),
'benveis': ('benveis',),
'en garde': ('en garde',),
'framåt': ('framåt',),
'krittende': ('krittende',),
'kvivitt': ('kvivitt',),
'maksis': ('maksis',),
'mangesteds': ('mangesteds',),
'møkka': ('møkka',),
'pill': ('pill',),
'sellende': ('sellende',),
'sirka': ('sirka',),
'subito': ('subito',),
'til sammen': ('til sammen',),
'tomrepes': ('tomrepes',),
'medurs': ('medurs',),
'moturs': ('moturs',)
}
| [
"ines@ines.io"
] | ines@ines.io |
7d2c62c8741ade915952b7fbf0f4c30ee5fa5b0f | ce90676fd0867aced31b86cb6b05db1b5f0f3828 | /random/tst234.py | cac1d82fe7ef0ebacd1700f1777d040e9c14b528 | [] | no_license | KoliosterNikolayIliev/Python_101_NI_Solutions | 9733c1e10967468fe48fc752532bcefacdebcfa9 | 2b878796bbdeff33590036560c0188c72f8fdb3f | refs/heads/main | 2023-08-11T01:08:19.482456 | 2021-10-03T17:33:32 | 2021-10-03T17:33:32 | 369,302,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,351 | py | import requests
import uuid
headers = {'Authorization': 'Token 201ad808f1e2dd3136777f56db2568a08fbfc219'}
# returns json of all banks in the given country
def get_banks_by_country(country):
response = requests.get(f'https://ob.nordigen.com/api/aspsps/?country={country}', headers=headers)
return response.json()
# returns the bank with the given id
def get_bank_by_id(bank_id):
response = requests.get(f'https://ob.nordigen.com/api/aspsps/{bank_id}', headers=headers)
return response.json()
def create_end_user_agreement(max_historical_days, enduser_id, aspsp_id):
"""
Use this step only if you want to specify the length of transaction history you want to retrieve.
If you skip this step, by default 90 days of transaction history will be retrieved.
:param max_historical_days: is the length of the transaction history to be retrieved, default is 90 days
:param enduser_id: is a unique end-user ID of someone who's using your services. Usually, it's UUID
:param aspsp_id: is the an id of a bank
"""
data = {'max_historical_days': max_historical_days, 'enduser_id': enduser_id, 'aspsp_id': aspsp_id}
response = requests.post('https://ob.nordigen.com/api/agreements/enduser/', headers=headers, data=data)
return response.json()
def create_requisition(enduser_id, reference, redirect, agreements, user_language=''):
"""
requisition is a collection of inputs for creating links and retrieving accounts.
For requisition API requests you will need to provide
:param enduser_id: if you made an user agreement the id should be the same as the user agreement
:param reference: additional layer of unique ID defined by you
:param redirect: URL where the end user will be redirected after finishing authentication in ASPSP
:param agreements: is an array of ID(s) from user agreement or an empty array if you didn't create
:param user_language: optional
:return:
"""
data = {
'enduser_id': enduser_id,
'reference': reference,
'redirect': redirect,
'agreements': agreements,
'user_language': user_language
}
response = requests.post('https://ob.nordigen.com/api/requisitions/', headers=headers, data=data)
return response.json()
# this is will build a link for authentication in ASPSP
def build_link(requisition_id, aspsp_id):
data = {
'aspsp_id': aspsp_id
}
response = requests.post(f'https://ob.nordigen.com/api/requisitions/{requisition_id}/links/', headers=headers,
data=data)
return response.json()
# the user's bank accounts can be listed. Pass the requisition ID to view the accounts.
def list_accounts(requisition_id):
response = requests.get(f'https://ob.nordigen.com/api/requisitions/{requisition_id}/', headers=headers)
return response.json()
"""
How to use nordigen api:
step 1: Get Access Token - https://ob.nordigen.com/
step 2: Choose a Bank - use get_banks_by_country() function to chose available banks.
step 3: Create an end-user agreement - (optional) if you want more than 90 transaction history days
use create_end_user_agreement() function
step 4: Create a requisition - user create_requisition function
step 5: Build a Link - when you created requisition you can build a link for authentication in ASPSP use
build_link() function
step 6: Access accounts - when you connected an account when you use list_accounts() function with the requisition_id
that you created you should see a accounts id's
step 7: Now when you connected an bank account you can use the functions bellow to get the data you need.
"""
def get_account_metadata(account_id):
response = requests.get(f'https://ob.nordigen.com/api/accounts/{account_id}/', headers=headers)
return response.json()
def get_account_balances(account_id):
response = requests.get(f'https://ob.nordigen.com/api/accounts/{account_id}/balances/', headers=headers)
return response.json()
def get_account_details(account_id):
response = requests.get(f'https://ob.nordigen.com/api/accounts/{account_id}/details/', headers=headers)
return response.json()
def get_account_transactions(account_id):
response = requests.get(f'https://ob.nordigen.com/api/accounts/{account_id}/transactions/', headers=headers)
return response.json() | [
"65191727+KoliosterNikolayIliev@users.noreply.github.com"
] | 65191727+KoliosterNikolayIliev@users.noreply.github.com |
0e508934c548e7e866977dbb13a82672b6180f94 | 024316d7672c7c2b2c558003c586df5116c73731 | /wavesynlib/fileutils/__init__.py | a476a38b6e9e90ed2515e926198448c35366f8d1 | [] | no_license | kaizhongkaizhong/WaveSyn | c305b03e44a961892a792a49601f625a90ae4f70 | b7918af5f66dba8c0d63cbb986465febe075cec2 | refs/heads/master | 2023-04-23T00:20:44.134876 | 2021-05-03T07:43:10 | 2021-05-03T07:43:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 28 00:38:46 2016
@author: Feng-cong Li
"""
import os
from os.path import dirname, join
import hy
try:
from wavesynlib.fileutils.hyutils import *
except hy.errors.HyCompilerError:
utils_path = join(dirname(__file__), 'hyutils.hy')
os.system(f'hyc {utils_path}')
from wavesynlib.fileutils.hyutils import *
| [
"xialulee@live.cn"
] | xialulee@live.cn |
f27d0a091eba208fe96474ee4959effa93451745 | a85ce270c8c67ab8a8c1bea577c4f8a0a054f8bf | /.venv/bin/jupyter-nbconvert | b7eb233280e1039b38407259910ad9d30b5b112b | [] | no_license | MohammedGhafri/data_visualization | afe2496100a5d204dcae3a8dd13bea51fe8f3c7c | c10c5dd8d1c687c5cf1f402b48178a3413c5abe1 | refs/heads/master | 2022-12-10T16:55:16.629174 | 2020-09-02T19:39:33 | 2020-09-02T19:39:33 | 292,341,273 | 0 | 0 | null | 2020-09-02T19:39:34 | 2020-09-02T16:52:04 | Python | UTF-8 | Python | false | false | 254 | #!/home/ghafri/data_visualization/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from nbconvert.nbconvertapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"eng.m.ghafri@gmail.com"
] | eng.m.ghafri@gmail.com | |
e239870c1886ba30562c9a92b0c9771ad29f59c4 | 76f4c947d5259bd8b3060fdb559b98720c670cae | /django_custom_user_model/django_custom_user_model/settings.py | ad8915fcc0ed18c9e600b175190cc90aab81ea63 | [
"MIT"
] | permissive | zkan/django-custom-user-model | 49de78f25af6c9324313aeea067a361778b5b225 | 3cb5937444b2c7a4f9f8a30621e8f2ed680dca1f | refs/heads/master | 2020-03-22T04:51:03.362817 | 2018-07-03T07:29:27 | 2018-07-03T07:29:27 | 139,525,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,142 | py | """
Django settings for django_custom_user_model project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd6oy1n7!7v#7y!asiy7l1wujuhz8n)_4b+k_v*x*4d$pcr6u&n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_custom_user_model.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_custom_user_model.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"kan@prontomarketing.com"
] | kan@prontomarketing.com |
abeffd04fd1e1951cd2f585cfac2f1bbae933fdc | f8c3c677ba536fbf5a37ac4343c1f3f3acd4d9b6 | /ICA_SDK/ICA_SDK/models/object_store_access.py | 2ed6681b5101d31413bd507ed8012a758430dcd6 | [] | no_license | jsialar/integrated_IAP_SDK | 5e6999b0a9beabe4dfc4f2b6c8b0f45b1b2f33eb | c9ff7685ef0a27dc4af512adcff914f55ead0edd | refs/heads/main | 2023-08-25T04:16:27.219027 | 2021-10-26T16:06:09 | 2021-10-26T16:06:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,883 | py | # coding: utf-8
"""
IAP Services
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ICA_SDK.configuration import Configuration
class ObjectStoreAccess(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'aws_s3_temporary_upload_credentials': 'AwsS3TemporaryUploadCredentials',
'direct_upload_credentials': 'DirectUploadCredentials',
'session_id': 'str'
}
attribute_map = {
'aws_s3_temporary_upload_credentials': 'awsS3TemporaryUploadCredentials',
'direct_upload_credentials': 'directUploadCredentials',
'session_id': 'sessionId'
}
def __init__(self, aws_s3_temporary_upload_credentials=None, direct_upload_credentials=None, session_id=None, local_vars_configuration=None): # noqa: E501
"""ObjectStoreAccess - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._aws_s3_temporary_upload_credentials = None
self._direct_upload_credentials = None
self._session_id = None
self.discriminator = None
if aws_s3_temporary_upload_credentials is not None:
self.aws_s3_temporary_upload_credentials = aws_s3_temporary_upload_credentials
if direct_upload_credentials is not None:
self.direct_upload_credentials = direct_upload_credentials
if session_id is not None:
self.session_id = session_id
@property
def aws_s3_temporary_upload_credentials(self):
"""Gets the aws_s3_temporary_upload_credentials of this ObjectStoreAccess. # noqa: E501
:return: The aws_s3_temporary_upload_credentials of this ObjectStoreAccess. # noqa: E501
:rtype: AwsS3TemporaryUploadCredentials
"""
return self._aws_s3_temporary_upload_credentials
@aws_s3_temporary_upload_credentials.setter
def aws_s3_temporary_upload_credentials(self, aws_s3_temporary_upload_credentials):
"""Sets the aws_s3_temporary_upload_credentials of this ObjectStoreAccess.
:param aws_s3_temporary_upload_credentials: The aws_s3_temporary_upload_credentials of this ObjectStoreAccess. # noqa: E501
:type: AwsS3TemporaryUploadCredentials
"""
self._aws_s3_temporary_upload_credentials = aws_s3_temporary_upload_credentials
@property
def direct_upload_credentials(self):
"""Gets the direct_upload_credentials of this ObjectStoreAccess. # noqa: E501
:return: The direct_upload_credentials of this ObjectStoreAccess. # noqa: E501
:rtype: DirectUploadCredentials
"""
return self._direct_upload_credentials
@direct_upload_credentials.setter
def direct_upload_credentials(self, direct_upload_credentials):
"""Sets the direct_upload_credentials of this ObjectStoreAccess.
:param direct_upload_credentials: The direct_upload_credentials of this ObjectStoreAccess. # noqa: E501
:type: DirectUploadCredentials
"""
self._direct_upload_credentials = direct_upload_credentials
@property
def session_id(self):
"""Gets the session_id of this ObjectStoreAccess. # noqa: E501
The id of the upload session # noqa: E501
:return: The session_id of this ObjectStoreAccess. # noqa: E501
:rtype: str
"""
return self._session_id
@session_id.setter
def session_id(self, session_id):
"""Sets the session_id of this ObjectStoreAccess.
The id of the upload session # noqa: E501
:param session_id: The session_id of this ObjectStoreAccess. # noqa: E501
:type: str
"""
self._session_id = session_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ObjectStoreAccess):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ObjectStoreAccess):
return True
return self.to_dict() != other.to_dict()
| [
"siajunren@gmail.com"
] | siajunren@gmail.com |
c066233bb8db9f3c120392b1b16cc9ce30cc4375 | fa5c0d0bc7dc3a18be3350f1b7da2068e0362afb | /duanping/save.py | 0f13959f99a200b0783a794d2a1b570a9305f1a4 | [] | no_license | earthloong/Douban_spiders | f2f1a368e97364456b226b0382817768248fabfc | 58af80e7a84d5ab970822c1ae68fbd6772e03084 | refs/heads/master | 2020-04-28T16:25:34.778998 | 2019-02-02T04:02:17 | 2019-02-02T04:02:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | # -*- coding: utf-8 -*-
# 此程序用来抓取 的数据
import os
import csv
import json
import sys
from save_data import database
class Spider(object):
def __init__(self):
self.db = database()
def get_data(self): # 获取数据
results = []
paths = os.listdir(os.getcwd())
for path in paths:
if 'data_DB.csv' in path:
with open(path, 'rU') as f:
tmp = csv.reader(f)
for i in tmp:
# print 'i:',i
t = [x.decode('gbk', 'ignore') for x in i]
# print 't:',t
if len(t) == 11:
dict_item = {'product_number': t[0],
'plat_number': t[1],
'nick_name': t[2],
'cmt_date': t[3],
'cmt_time': t[4],
'comments': t[5],
'like_cnt': t[6],
'cmt_reply_cnt': t[7],
'long_comment': t[8],
'last_modify_date': t[9],
'src_url': t[10]}
results.append(dict_item)
else:
print '少字段>>>t:',t
return results
def save_sql(self, table_name): # 保存到sql
items = self.get_data()
all = len(items)
count = 1
for item in items:
try:
print 'count:%d | all:%d' % (count, all)
count += 1
self.db.up_data(table_name, item)
except Exception as e:
print '插入数据库错误>>>',e
pass
if __name__ == "__main__":
spider = Spider()
spider = Spider()
print u'开始录入数据'
spider.save_sql('T_COMMENTS_PUB_MOVIE') # 手动输入库名
print u'录入完毕'
spider.db.db.close()
| [
"492741071@qq.com"
] | 492741071@qq.com |
59e14cb31e210da2525806ce609826785c4b60fd | f47d52330c2f53e8bc3086c23854d3022b802866 | /split_coco.py | dc773dd40c59242d23e4433ffcdf34a4a8f41edf | [] | no_license | Qidian213/GigaVersion | 45dd31d8209d79e3b77a891a19cd6c2bbb5e2683 | 2244e3a60be800b7fbe1fde28fb10be51890ce0a | refs/heads/main | 2023-06-30T18:16:23.232348 | 2021-08-02T01:08:01 | 2021-08-02T01:08:01 | 391,779,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,910 | py |
import pycocotools.coco as coco
import json
import numpy as np
import cv2
import shutil
import random
train_val_list = json.load(open('train_val.json', 'r'))
train_list = train_val_list['train']
val_list = train_val_list['val']
train_images_coco =[]
train_annotations =[]
val_images_coco =[]
val_annotations =[]
img_num = 0
ann_num = 0
coco_data = coco.COCO('VISIBLE_COCO.json')
categories = coco_data.dataset['categories']
print(categories)
images = coco_data.getImgIds()
for img_id in images:
img_num += 1
img_info = coco_data.loadImgs(ids=[img_id])[0]
ann_ids = coco_data.getAnnIds(imgIds=[img_id])
img_anns = coco_data.loadAnns(ids=ann_ids)
file_name = img_info['file_name'].split('__')[0]
if(file_name in train_list):
img_info['id'] = img_num
img_info['file_name'] = img_info['file_name']
train_images_coco.append(img_info)
for ann in img_anns:
ann['image_id'] = img_num
ann['id'] = ann_num
ann_num += 1
train_annotations.append(ann)
else:
img_info['id'] = img_num
img_info['file_name'] = img_info['file_name']
val_images_coco.append(img_info)
for ann in img_anns:
ann['image_id'] = img_num
ann['id'] = ann_num
ann_num += 1
val_annotations.append(ann)
train_data_coco={}
train_data_coco['images'] = train_images_coco
train_data_coco['categories'] = categories
train_data_coco['annotations']= train_annotations
json.dump(train_data_coco, open('visible_bbox_train.json', 'w'), indent=4)
val_data_coco={}
val_data_coco['images'] = val_images_coco
val_data_coco['categories'] = categories
val_data_coco['annotations']= val_annotations
json.dump(val_data_coco, open('visible_bbox_val.json', 'w'), indent=4)
| [
"xhx1247786632@gmail.com"
] | xhx1247786632@gmail.com |
c8666866110c40d9d6ea8c980dfedb9f87daa040 | 0444918f75705bdfa177b45fdf8b903c6b63ab88 | /examples/dymoscale_simpletest.py | c9808bdccd88959f6205aae602a08224eb1f3e49 | [
"MIT"
] | permissive | ntoll/Adafruit_CircuitPython_DymoScale | 80785aba4a67a5ab5e533b75d05c968a999d0d5e | c57e45659650bf4ffb2b33eaea7dc462f6c63cbf | refs/heads/master | 2020-07-17T16:26:21.503499 | 2019-09-03T10:48:55 | 2019-09-03T10:48:55 | 206,054,402 | 0 | 0 | MIT | 2019-09-03T10:47:33 | 2019-09-03T10:47:33 | null | UTF-8 | Python | false | false | 639 | py | import time
import board
import digitalio
import adafruit_dymoscale
# initialize the dymo scale
units_pin = digitalio.DigitalInOut(board.D3)
units_pin.switch_to_output()
dymo = adafruit_dymoscale.DYMOScale(board.D4, units_pin)
# take a reading of the current time
time_stamp = time.monotonic()
while True:
reading = dymo.weight
text = "{} g".format(reading.weight)
print(text)
# to avoid sleep mode, toggle the units pin every 2 mins.
if (time.monotonic() - time_stamp) > 120:
print('toggling units button...')
dymo.toggle_unit_button()
# reset the time
time_stamp = time.monotonic()
| [
"robots199@me.com"
] | robots199@me.com |
68002fbdfd606d9c99b4eaa029b17e29c4aed4f9 | 5f404180423f854df798ea907fd13094f1eccfae | /tests/test_tutorial_filter.py | 14e85a66403ea7c8c5236dd1e7e14af866f9428c | [
"MIT"
] | permissive | sdpython/td3a_cpp | e776bceb65285eca7f9f0400fb5f96cd8bf2393e | 1ca08907be03a09bb9cb89b2ca334b1fa5305648 | refs/heads/master | 2023-05-11T06:18:00.968817 | 2023-04-30T09:15:14 | 2023-04-30T09:15:14 | 226,640,683 | 1 | 7 | NOASSERTION | 2021-10-21T22:46:49 | 2019-12-08T09:05:21 | Cython | UTF-8 | Python | false | false | 2,262 | py | """
Unit tests for ``random_strategy``.
"""
import unittest
import numpy
from numpy.testing import assert_equal
from td3a_cpp.tutorial.experiment_cython import (
pyfilter_dmax, filter_dmax_cython,
filter_dmax_cython_optim,
cyfilter_dmax,
cfilter_dmax, cfilter_dmax2, cfilter_dmax16,
cfilter_dmax4
)
class TestTutorialFilter(unittest.TestCase):
def test_pyfilter_dmax(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
pyfilter_dmax(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_filter_dmax_cython(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
filter_dmax_cython(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_filter_dmax_cython_optim(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
filter_dmax_cython_optim(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_filter_cyfilter_dmax(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
cyfilter_dmax(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_filter_cfilter_dmax(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
cfilter_dmax(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_filter_cfilter_dmax2(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
cfilter_dmax2(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_filter_cfilter_dmax16(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
cfilter_dmax16(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_filter_cfilter_dmax4(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
cfilter_dmax4(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
def test_cfilter_dmax(self):
va = numpy.random.randn(100).astype(numpy.float64)
vb = va.copy()
cfilter_dmax(va, 0)
vb[vb > 0] = 0
assert_equal(va, vb)
if __name__ == '__main__':
unittest.main()
| [
"xavier.dupre@gmail.com"
] | xavier.dupre@gmail.com |
fe8a4ba65c8e91aefd4f7691f21ea64334745863 | dd87194dee537c2291cf0c0de809e2b1bf81b5b2 | /k8sclient/models/v1beta1_scale_spec.py | 57cf26a08a700d346cc6e98651b1499c8acf638a | [
"Apache-2.0"
] | permissive | Arvinhub/client-python | 3ea52640ab02e4bf5677d0fd54fdb4503ecb7768 | d67df30f635231d68dc4c20b9b7e234c616c1e6a | refs/heads/master | 2023-08-31T03:25:57.823810 | 2016-11-02T22:44:36 | 2016-11-02T22:44:36 | 73,865,578 | 1 | 0 | Apache-2.0 | 2018-10-10T12:16:45 | 2016-11-15T23:47:17 | Python | UTF-8 | Python | false | false | 3,582 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: unversioned
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1ScaleSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, replicas=None):
"""
V1beta1ScaleSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'replicas': 'int'
}
self.attribute_map = {
'replicas': 'replicas'
}
self._replicas = replicas
@property
def replicas(self):
"""
Gets the replicas of this V1beta1ScaleSpec.
desired number of instances for the scaled object.
:return: The replicas of this V1beta1ScaleSpec.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this V1beta1ScaleSpec.
desired number of instances for the scaled object.
:param replicas: The replicas of this V1beta1ScaleSpec.
:type: int
"""
self._replicas = replicas
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mehdy@google.com"
] | mehdy@google.com |
a3717d40f9bcfd31bd41b77ef503e38bca83308a | 4e26d797d72678a1c14ee59522964013eef3d551 | /usuarios/admin.py | 2517a9a6bcbd0522246cf577f8e40c0106d80d1a | [] | no_license | GomesMilla/SistemaDeControle | 3def1f47793b28317b2462dc61098145c6329588 | b9e2aad12bfaa8858ea45aa9adfc3c0f879a45e8 | refs/heads/main | 2023-03-15T05:25:23.606211 | 2021-03-23T19:19:10 | 2021-03-23T19:19:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | from django.contrib import admin
from usuarios.models import Usuario
admin.site.register(Usuario)
# Register your models here.
| [
"camila.adriana.gomes@outlook.com"
] | camila.adriana.gomes@outlook.com |
375e994b7829514a5c1fcb79ec62436a0367d65f | e0b50a8ff40097b9896ad69d098cbbbbe4728531 | /dcmanager/api/app.py | fca85673325aade81b61debc60690625e0a23c64 | [
"Apache-2.0"
] | permissive | aleks-kozyrev/stx-distcloud | ccdd5c76dd358b8aa108c524138731aa2b0c8a53 | a4cebb85c45c8c5f1f0251fbdc436c461092171c | refs/heads/master | 2020-03-27T11:11:09.348241 | 2018-08-27T14:33:50 | 2018-08-27T14:33:50 | 146,470,708 | 0 | 0 | Apache-2.0 | 2018-08-28T15:47:12 | 2018-08-28T15:47:08 | Python | UTF-8 | Python | false | false | 2,808 | py | # Copyright (c) 2015 Huawei, Tech. Co,. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
import pecan
from keystonemiddleware import auth_token
from oslo_config import cfg
from oslo_middleware import request_id
from oslo_service import service
from dcmanager.common import context as ctx
from dcmanager.common.i18n import _
def setup_app(*args, **kwargs):
opts = cfg.CONF.pecan
config = {
'server': {
'port': cfg.CONF.bind_port,
'host': cfg.CONF.bind_host
},
'app': {
'root': 'dcmanager.api.controllers.root.RootController',
'modules': ['dcmanager.api'],
"debug": opts.debug,
"auth_enable": opts.auth_enable,
'errors': {
400: '/error',
'__force_dict__': True
}
}
}
pecan_config = pecan.configuration.conf_from_dict(config)
# app_hooks = [], hook collection will be put here later
app = pecan.make_app(
pecan_config.app.root,
debug=False,
wrap_app=_wrap_app,
force_canonical=False,
hooks=lambda: [ctx.AuthHook()],
guess_content_type_from_ext=True
)
return app
def _wrap_app(app):
app = request_id.RequestId(app)
if cfg.CONF.pecan.auth_enable and cfg.CONF.auth_strategy == 'keystone':
conf = dict(cfg.CONF.keystone_authtoken)
# Change auth decisions of requests to the app itself.
conf.update({'delay_auth_decision': True})
# NOTE: Policy enforcement works only if Keystone
# authentication is enabled. No support for other authentication
# types at this point.
return auth_token.AuthProtocol(app, conf)
else:
return app
_launcher = None
def serve(api_service, conf, workers=1):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
_launcher = service.launch(conf, api_service, workers=workers)
def wait():
_launcher.wait()
| [
"scott.little@windriver.com"
] | scott.little@windriver.com |
b6a06559fa2bd1acd77a3728d501f4b66cbc9581 | 63f0ca44a91c1c4eed7eb2b255b9431c54ad931e | /util/metrics/entropy.py | a09dd152fb755a60dc8efe0ca5afff6e560f74df | [
"Apache-2.0"
] | permissive | jamesoneill12/LayerFusion | e4685e2a54467d6c4dc02022b97af5da2c429aa7 | 99cba1030ed8c012a453bc7715830fc99fb980dc | refs/heads/main | 2023-08-18T04:45:16.662884 | 2021-09-26T18:12:48 | 2021-09-26T18:12:48 | 410,594,160 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,980 | py | # -*- coding: utf-8 -*-
import os
import sys
from scipy import stats
import torch
from torch import nn
import scipy.spatial as ss
from scipy.special import digamma
from math import log
import numpy as np
import random
import warnings
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
testdir = os.path.dirname(__file__)
def symbolize(X, m):
"""
Converts numeric values of the series to a symbolic version of it based
on the m consecutive values.
Parameters
----------
X : Series to symbolize.
m : length of the symbolic subset.
Returns
----------
List of symbolized X
"""
X = np.array(X)
if m >= len(X):
raise ValueError("Length of the series must be greater than m")
dummy = []
for i in range(m):
l = np.roll(X, -i)
dummy.append(l[:-(m - 1)])
dummy = np.array(dummy)
symX = []
for mset in dummy.T:
rank = stats.rankdata(mset, method="min")
symbol = np.array2string(rank, separator="")
symbol = symbol[1:-1]
symX.append(symbol)
return symX
def symbolic_mutual_information(symX, symY):
"""
Computes the symbolic mutual information between symbolic series X and
symbolic series Y.
Parameters
----------
symX : Symbolic series X.
symY : Symbolic series Y.
Returns
----------
Value for mutual information
"""
if len(symX) != len(symY):
raise ValueError('All arrays must have same length')
symX = np.array(symX)
symY = np.array(symY)
symbols = np.unique(np.concatenate((symX, symY))).tolist()
jp = symbolic_joint_probabilities(symX, symY)
pX = symbolic_probabilities(symX)
pY = symbolic_probabilities(symY)
MI = 0
for yi in list(pY.keys()):
for xi in list(pX.keys()):
a = pX[xi]
b = pY[yi]
try:
c = jp[yi][xi]
MI += c * np.log(c / (a * b)) / np.log(len(symbols));
except KeyError:
continue
except:
print("Unexpected Error")
raise
return MI
def symbolic_transfer_entropy(symX, symY):
"""
Computes T(Y->X), the transfer of entropy from symbolic series Y to X.
Parameters
----------
symX : Symbolic series X.
symY : Symbolic series Y.
Returns
----------
Value for mutual information
"""
if len(symX) != len(symY):
raise ValueError('All arrays must have same length')
symX = np.array(symX)
symY = np.array(symY)
cp = symbolic_conditional_probabilities_consecutive(symX)
cp2 = symbolic_conditional_probabilities_consecutive_external(symX, symY)
jp = symbolic_joint_probabilities_consecutive_external(symX, symY)
TE = 0
for yi in list(jp.keys()):
for xi in list(jp[yi].keys()):
for xii in list(jp[yi][xi].keys()):
try:
a = cp[xi][xii]
b = cp2[yi][xi][xii]
c = jp[yi][xi][xii]
TE += c * np.log(b / a) / np.log(2.);
except KeyError:
continue
except:
print("Unexpected Error")
raise
del cp
del cp2
del jp
return TE
def symbolic_probabilities(symX):
"""
Computes the conditional probabilities where M[A][B] stands for the
probability of getting B after A.
Parameters
----------
symX : Symbolic series X.
symbols: Collection of symbols. If "None" calculated from symX
Returns
----------
Matrix with conditional probabilities
"""
symX = np.array(symX)
# initialize
p = {}
n = len(symX)
for xi in symX:
if xi in p:
p[xi] += 1.0 / n
else:
p[xi] = 1.0 / n
return p
def symbolic_joint_probabilities(symX, symY):
"""
Computes the joint probabilities where M[yi][xi] stands for the
probability of ocurrence yi and xi.
Parameters
----------
symX : Symbolic series X.
symY : Symbolic series Y.
symbols: Collection of symbols. If "None" calculated from symX
Returns
----------
Matrix with joint probabilities
"""
if len(symX) != len(symY):
raise ValueError('All arrays must have same length')
symX = np.array(symX)
symY = np.array(symY)
# initialize
jp = {}
n = len(symX)
for yi, xi in zip(symY, symX):
if yi in jp:
if xi in jp[yi]:
jp[yi][xi] += 1.0 / n
else:
jp[yi][xi] = 1.0 / n
else:
jp[yi] = {}
jp[yi][xi] = 1.0 / n
return jp
def symbolic_conditional_probabilities(symX, symY):
"""
Computes the conditional probabilities where M[A][B] stands for the
probability of getting "B" in symX, when we get "A" in symY.
Parameters
----------
symX : Symbolic series X.
symY : Symbolic series Y.
Returns
----------
Matrix with conditional probabilities
"""
if len(symX) != len(symY):
raise ValueError('All arrays must have same length')
symX = np.array(symX)
symY = np.array(symY)
# initialize
cp = {}
n = {}
for xi, yi in zip(symX, symY):
if yi in cp:
n[yi] += 1
if xi in cp[yi]:
cp[yi][xi] += 1.0
else:
cp[yi][xi] = 1.0
else:
cp[yi] = {}
cp[yi][xi] = 1.0
n[yi] = 1
for yi in list(cp.keys()):
for xi in list(cp[yi].keys()):
cp[yi][xi] /= n[yi]
return cp
def symbolic_conditional_probabilities_consecutive(symX):
"""
Computes the conditional probabilities where M[A][B] stands for the
probability of getting B after A.
Parameters
----------
symX : Symbolic series X.
symbols: Collection of symbols. If "None" calculated from symX
Returns
----------
Matrix with conditional probabilities
"""
symX = np.array(symX)
cp = symbolic_conditional_probabilities(symX[1:], symX[:-1])
return cp
def symbolic_double_conditional_probabilities(symX, symY, symZ):
"""
Computes the conditional probabilities where M[y][z][x] stands for the
probability p(x|y,z).
Parameters
----------
symX : Symbolic series X.
symY : Symbolic series Y.
symZ : Symbolic series Z.
Returns
----------
Matrix with conditional probabilities
"""
if (len(symX) != len(symY)) or (len(symY) != len(symZ)):
raise ValueError('All arrays must have same length')
symX = np.array(symX)
symY = np.array(symY)
symZ = np.array(symZ)
# initialize
cp = {}
n = {}
for x, y, z in zip(symX, symY, symZ):
if y in cp:
if z in cp[y]:
n[y][z] += 1.0
if x in cp[y][z]:
cp[y][z][x] += 1.0
else:
cp[y][z][x] = 1.0
else:
cp[y][z] = {}
cp[y][z][x] = 1.0
n[y][z] = 1.0
else:
cp[y] = {}
n[y] = {}
cp[y][z] = {}
n[y][z] = 1.0
cp[y][z][x] = 1.0
for y in list(cp.keys()):
for z in list(cp[y].keys()):
for x in list(cp[y][z].keys()):
cp[y][z][x] /= n[y][z]
return cp
def symbolic_conditional_probabilities_consecutive_external(symX, symY):
"""
Computes the conditional probabilities where M[yi][xi][xii] stands for the
probability p(xii|xi,yi), where xii = x(t+1), xi = x(t) and yi = y(t).
Parameters
----------
symX : Symbolic series X.
symY : Symbolic series Y.
symbols: Collection of symbols. If "None" calculated from symX
Returns
----------
Matrix with conditional probabilities
"""
if len(symX) != len(symY):
raise ValueError('All arrays must have same length')
symX = np.array(symX)
symY = np.array(symY)
cp = symbolic_double_conditional_probabilities(symX[1:], symY[:-1], symX[:-1])
return cp
def symbolic_joint_probabilities_triple(symX, symY, symZ):
"""
Computes the joint probabilities where M[y][z][x] stands for the
probability of coocurrence y, z and x p(y,z,x).
Parameters
----------
symX : Symbolic series X.
symY : Symbolic series Y.
symZ : Symbolic series Z.
Returns
----------
Matrix with joint probabilities
"""
if (len(symX) != len(symY)) or (len(symY) != len(symZ)):
raise ValueError('All arrays must have same length')
symX = np.array(symX)
symY = np.array(symY)
symZ = np.array(symZ)
# initialize
jp = {}
n = len(symX)
for x, y, z in zip(symX, symY, symZ):
if y in jp:
if z in jp[y]:
if x in jp[y][z]:
jp[y][z][x] += 1.0 / n
else:
jp[y][z][x] = 1.0 / n
else:
jp[y][z] = {}
jp[y][z][x] = 1.0 / n
else:
jp[y] = {}
jp[y][z] = {}
jp[y][z][x] = 1.0 / n
return jp
def symbolic_joint_probabilities_consecutive_external(symX, symY):
"""
Computes the joint probabilities where M[yi][xi][xii] stands for the
probability of ocurrence yi, xi and xii.
Parameters
----------
symX : Symbolic series X.
symY : Symbolic series Y.
symbols: Collection of symbols. If "None" calculated from symX
Returns
----------
Matrix with joint probabilities
"""
if len(symX) != len(symY):
raise ValueError('All arrays must have same length')
symX = np.array(symX)
symY = np.array(symY)
jp = symbolic_joint_probabilities_triple(symX[1:], symY[:-1], symX[:-1])
return jp
def tens2num(X):
if type(X) == torch.Tensor:
if X.is_cuda: X = X.cpu()
X = X.numpy()
return X
def compute_te(X, Y):
"""for a 1d tensor"""
X = tens2num(X)
Y = tens2num(Y)
symX = symbolize(X, 3)
symY = symbolize(Y, 3)
print(symX)
print(len(symX))
print(len(symY))
MI = symbolic_mutual_information(symX, symY)
TXY = symbolic_transfer_entropy(symX, symY)
TYX = symbolic_transfer_entropy(symY, symX)
TE = TYX - TXY
print("---------------------- Random Case ----------------------")
print("Mutual Information = " + str(MI))
print("T(Y->X) = " + str(TXY) + " T(X->Y) = " + str(TYX))
print("Transfer of Entropy = " + str(TE))
return TE
def compute_te_net(net):
tes = []
# count = 0
for i, (p_name, p) in enumerate(net.named_parameters()):
if i == 0:
temp = p.data.cpu().numpy()
te_val = compute_te(temp, temp)
else:
temp_n = p.data.cpu().numpy()
te_val = compute_te(temp, temp_n)
temp = temp_n
tes.append(te_val)
return torch.cuda.Tensor(tes)
def test_te_net():
net = nn.Sequential(nn.Linear(100, 30), nn.Linear(30, 10), nn.Linear(10, 5))
te_vals = compute_te_net(net)
print(te_vals)
def main():
X = np.random.randint(10, size=3000)
Y = np.random.randint(10, size=3000)
# Uncomment this for an example of a time series (Y) clearly anticipating values of X
# Y = np.roll(X,-1)
symX = symbolize(X, 3)
symY = symbolize(Y, 3)
MI = symbolic_mutual_information(symX, symY)
TXY = symbolic_transfer_entropy(symX, symY)
TYX = symbolic_transfer_entropy(symY, symX)
TE = TYX - TXY
print("---------------------- Random Case ----------------------")
print("Mutual Information = " + str(MI))
print("T(Y->X) = " + str(TXY) + " T(X->Y) = " + str(TYX))
print("Transfer of Entropy = " + str(TE))
# Shifted Values
X = np.random.randint(10, size=3000)
Y = np.roll(X, -1)
symX = symbolize(X, 3)
symY = symbolize(Y, 3)
MI = symbolic_mutual_information(symX, symY)
TXY = symbolic_transfer_entropy(symX, symY)
TYX = symbolic_transfer_entropy(symY, symX)
TE = TYX - TXY
print("------------------ Y anticipates X Case -----------------")
print("Mutual Information = " + str(MI))
print("T(Y->X) = " + str(TXY) + " T(X->Y) = " + str(TYX))
print("Transfer of Entropy = " + str(TE))
"""---------------- https://raw.githubusercontent.com/gregversteeg/NPEET/master/npeet/entropy_estimators.py ------------"""
#!/usr/bin/env python
# Written by Greg Ver Steeg
# See readme.pdf for documentation
# Or go to http://www.isi.edu/~gregv/npeet.html
# CONTINUOUS ESTIMATORS
def entropy(x, k=3, base=2):
""" The classic K-L k-nearest neighbor continuous entropy estimator
x should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
x = np.asarray(x)
n_elements, n_features = x.shape
x = add_noise(x)
tree = ss.cKDTree(x)
nn = query_neighbors(tree, x, k)
const = digamma(n_elements) - digamma(k) + n_features * log(2)
return (const + n_features * np.log(nn).mean()) / log(base)
def centropy(x, y, k=3, base=2):
""" The classic K-L k-nearest neighbor continuous entropy estimator for the
entropy of X conditioned on Y.
"""
xy = np.c_[x, y]
entropy_union_xy = entropy(xy, k=k, base=base)
entropy_y = entropy(y, k=k, base=base)
return entropy_union_xy - entropy_y
def tc(xs, k=3, base=2):
xs_columns = np.expand_dims(xs, axis=0).T
entropy_features = [entropy(col, k=k, base=base) for col in xs_columns]
return np.sum(entropy_features) - entropy(xs, k, base)
def ctc(xs, y, k=3, base=2):
xs_columns = np.expand_dims(xs, axis=0).T
centropy_features = [centropy(col, y, k=k, base=base) for col in xs_columns]
return np.sum(centropy_features) - centropy(xs, y, k, base)
def corex(xs, ys, k=3, base=2):
xs_columns = np.expand_dims(xs, axis=0).T
cmi_features = [mi(col, ys, k=k, base=base) for col in xs_columns]
return np.sum(cmi_features) - mi(xs, ys, k=k, base=base)
def mi(x, y, z=None, k=3, base=2):
""" Mutual information of x and y (conditioned on z if z is not None)
x, y should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert len(x) == len(y), "Arrays should have same length"
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
x, y = np.asarray(x), np.asarray(y)
x = add_noise(x)
y = add_noise(y)
points = [x, y]
if z is not None:
points.append(z)
points = np.hstack(points)
# Find nearest neighbors in joint space, p=inf means max-norm
tree = ss.cKDTree(points)
dvec = query_neighbors(tree, points, k)
if z is None:
a, b, c, d = avgdigamma(x, dvec), avgdigamma(y, dvec), digamma(k), digamma(len(x))
else:
xz = np.c_[x, z]
yz = np.c_[y, z]
a, b, c, d = avgdigamma(xz, dvec), avgdigamma(yz, dvec), avgdigamma(z, dvec), digamma(k)
return (-a - b + c + d) / log(base)
def cmi(x, y, z, k=3, base=2):
""" Mutual information of x and y, conditioned on z
Legacy function. Use mi(x, y, z) directly.
"""
return mi(x, y, z=z, k=k, base=base)
def kldiv(x, xp, k=3, base=2):
""" KL Divergence between p and q for x~p(x), xp~q(x)
x, xp should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert k < min(len(x), len(xp)), "Set k smaller than num. samples - 1"
assert len(x[0]) == len(xp[0]), "Two distributions must have same dim."
d = len(x[0])
n = len(x)
m = len(xp)
const = log(m) - log(n - 1)
tree = ss.cKDTree(x)
treep = ss.cKDTree(xp)
nn = query_neighbors(tree, x, k)
nnp = query_neighbors(treep, x, k - 1)
return (const + d * (np.log(nnp).mean() - np.log(nn).mean())) / log(base)
# DISCRETE ESTIMATORS
def entropyd(sx, base=2):
""" Discrete entropy estimator
sx is a list of samples
"""
unique, count = np.unique(sx, return_counts=True, axis=0)
# Convert to float as otherwise integer division results in all 0 for proba.
proba = count.astype(float) / len(sx)
# Avoid 0 division; remove probabilities == 0.0 (removing them does not change the entropy estimate as 0 * log(1/0) = 0.
proba = proba[proba > 0.0]
return np.sum(proba * np.log(1. / proba)) / log(base)
def midd(x, y, base=2):
""" Discrete mutual information estimator
Given a list of samples which can be any hashable object
"""
assert len(x) == len(y), "Arrays should have same length"
return entropyd(x, base) - centropyd(x, y, base)
def cmidd(x, y, z, base=2):
""" Discrete mutual information estimator
Given a list of samples which can be any hashable object
"""
assert len(x) == len(y) == len(z), "Arrays should have same length"
xz = np.c_[x, z]
yz = np.c_[y, z]
xyz = np.c_[x, y, z]
return entropyd(xz, base) + entropyd(yz, base) - entropyd(xyz, base) - entropyd(z, base)
def centropyd(x, y, base=2):
""" The classic K-L k-nearest neighbor continuous entropy estimator for the
entropy of X conditioned on Y.
"""
xy = np.c_[x, y]
return entropyd(xy, base) - entropyd(y, base)
def tcd(xs, base=2):
xs_columns = np.expand_dims(xs, axis=0).T
entropy_features = [entropyd(col, base=base) for col in xs_columns]
return np.sum(entropy_features) - entropyd(xs, base)
def ctcd(xs, y, base=2):
xs_columns = np.expand_dims(xs, axis=0).T
centropy_features = [centropyd(col, y, base=base) for col in xs_columns]
return np.sum(centropy_features) - centropyd(xs, y, base)
def corexd(xs, ys, base=2):
xs_columns = np.expand_dims(xs, axis=0).T
cmi_features = [midd(col, ys, base=base) for col in xs_columns]
return np.sum(cmi_features) - midd(xs, ys, base)
# MIXED ESTIMATORS
def micd(x, y, k=3, base=2, warning=True):
""" If x is continuous and y is discrete, compute mutual information
"""
assert len(x) == len(y), "Arrays should have same length"
entropy_x = entropy(x, k, base)
y_unique, y_count = np.unique(y, return_counts=True, axis=0)
y_proba = y_count / len(y)
entropy_x_given_y = 0.
for yval, py in zip(y_unique, y_proba):
x_given_y = x[(y == yval).all(axis=1)]
if k <= len(x_given_y) - 1:
entropy_x_given_y += py * entropy(x_given_y, k, base)
else:
if warning:
warnings.warn("Warning, after conditioning, on y={yval} insufficient data. "
"Assuming maximal entropy in this case.".format(yval=yval))
entropy_x_given_y += py * entropy_x
return abs(entropy_x - entropy_x_given_y) # units already applied
def midc(x, y, k=3, base=2, warning=True):
return micd(y, x, k, base, warning)
def centropycd(x, y, k=3, base=2, warning=True):
return entropy(x, base) - micd(x, y, k, base, warning)
def centropydc(x, y, k=3, base=2, warning=True):
return centropycd(y, x, k=k, base=base, warning=warning)
def ctcdc(xs, y, k=3, base=2, warning=True):
xs_columns = np.expand_dims(xs, axis=0).T
centropy_features = [centropydc(col, y, k=k, base=base, warning=warning) for col in xs_columns]
return np.sum(centropy_features) - centropydc(xs, y, k, base, warning)
def ctccd(xs, y, k=3, base=2, warning=True):
return ctcdc(y, xs, k=k, base=base, warning=warning)
def corexcd(xs, ys, k=3, base=2, warning=True):
return corexdc(ys, xs, k=k, base=base, warning=warning)
def corexdc(xs, ys, k=3, base=2, warning=True):
return tcd(xs, base) - ctcdc(xs, ys, k, base, warning)
# UTILITY FUNCTIONS
def add_noise(x, intens=1e-10):
# small noise to break degeneracy, see doc.
return x + intens * np.random.random_sample(x.shape)
def query_neighbors(tree, x, k):
return tree.query(x, k=k + 1, p=float('inf'), n_jobs=-1)[0][:, k]
def avgdigamma(points, dvec):
# This part finds number of neighbors in some radius in the marginal space
# returns expectation value of <psi(nx)>
n_elements = len(points)
tree = ss.cKDTree(points)
avg = 0.
dvec = dvec - 1e-15
for point, dist in zip(points, dvec):
# subtlety, we don't include the boundary point,
# but we are implicitly adding 1 to kraskov def bc center point is included
num_points = len(tree.query_ball_point(point, dist, p=float('inf')))
avg += digamma(num_points) / n_elements
return avg
# TESTS
def shuffle_test(measure, x, y, z=False, ns=200, ci=0.95, **kwargs):
""" Shuffle test
Repeatedly shuffle the x-values and then estimate measure(x, y, [z]).
Returns the mean and conf. interval ('ci=0.95' default) over 'ns' runs.
'measure' could me mi, cmi, e.g. Keyword arguments can be passed.
Mutual information and CMI should have a mean near zero.
"""
x_clone = np.copy(x) # A copy that we can shuffle
outputs = []
for i in range(ns):
np.random.shuffle(x_clone)
if z:
outputs.append(measure(x_clone, y, z, **kwargs))
else:
outputs.append(measure(x_clone, y, **kwargs))
outputs.sort()
return np.mean(outputs), (outputs[int((1. - ci) / 2 * ns)], outputs[int((1. + ci) / 2 * ns)])
if __name__ == "__main__":
print("MI between two independent continuous random variables X and Y:")
print(mi(np.random.rand(1000, 1000), np.random.rand(1000, 300), base=2))
# test_te_net()
#main()
#import torch
#x, y = torch.randn((10, 100)).numpy(), torch.randn((10, 100)).numpy()
#print(symbolic_mutual_information(x, y)) | [
"james.oneill@insight-centre.org"
] | james.oneill@insight-centre.org |
54256491ebbf1cf309cd445c078bf5fcd3c63642 | 32809f6f425bf5665fc19de2bc929bacc3eeb469 | /src/0278-First-Bad-Version/0278.py | ab08d44570fbedd547f7a90eca4cb6bba6e4ad05 | [] | no_license | luliyucoordinate/Leetcode | 9f6bf01f79aa680e2dff11e73e4d10993467f113 | bcc04d49969654cb44f79218a7ef2fd5c1e5449a | refs/heads/master | 2023-05-25T04:58:45.046772 | 2023-05-24T11:57:20 | 2023-05-24T11:57:20 | 132,753,892 | 1,575 | 569 | null | 2023-05-24T11:57:22 | 2018-05-09T12:30:59 | C++ | UTF-8 | Python | false | false | 307 | py | class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
l, r = 0, n
while l < r:
mid = (l + r) >> 1
if isBadVersion(mid):
r = mid
else:
l = mid + 1
return l | [
"luliyucoordinate@outlook.com"
] | luliyucoordinate@outlook.com |
9f0de9c4bfa3b96834d1c14d7260d39cedcaddd5 | 391decb17414b32941bf43380de4d1474334c29c | /.history/Function_Login_20210907201238.py | 18e67d522775604c3f0cbd51e45425cce2f964bc | [] | no_license | leonardin999/Restaurant-Management-Systems-GUI-RMS- | b17cf910ce0955b370ab51d00d161f96a2fb5ccd | 531726a378ced78de079bfffb68a0a304cfbc328 | refs/heads/main | 2023-08-12T09:21:46.196684 | 2021-09-08T14:58:48 | 2021-09-08T14:58:48 | 403,201,841 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,836 | py | ################################################################################
##
## BY: PHUNG HUNG BINH
## This project can be used freely for all uses, as long as they maintain the
## respective credits only in the Python scripts, any information in the visual
## interface (GUI) can be modified without any implication.
##
## There are limitations on Qt licenses if you want to use your products
## commercially, I recommend reading them on the official website:
## https://doc.qt.io/qtforpython/licenses.html
##
from main import *
## ==> GLOBALS
GLOBAL_STATE = 0
GLOBAL_TITLE_BAR = True
## ==> COUT INITIAL MENU
count = 1
class Functions_Login(Login_Windown):
def removeTitleBar(status):
global GLOBAL_TITLE_BAR
GLOBAL_TITLE_BAR = status
def uiDefinitions(self):
## SHOW ==> DROP SHADOW
self.shadow = QGraphicsDropShadowEffect(self)
self.shadow.setBlurRadius(17)
self.shadow.setXOffset(0)
self.shadow.setYOffset(0)
self.shadow.setColor(QColor(0, 0, 0, 150))
self.ui.frame.setGraphicsEffect(self.shadow)
self.shadow1 = QGraphicsDropShadowEffect(self)
self.shadow1.setBlurRadius(17)
self.shadow1.setXOffset(0)
self.shadow1.setYOffset(0)
self.shadow1.setColor(QColor(0, 0, 0, 150))
self.ui.login_area.setGraphicsEffect(self.shadow1)
## SHOW ==> DROP SHADOW
self.shadow = QGraphicsDropShadowEffect(self)
self.shadow.setBlurRadius(17)
self.shadow.setXOffset(0)
self.shadow.setYOffset(0)
self.shadow.setColor(QColor(0, 0, 0, 150))
self.ui.frame_main.setGraphicsEffect(self.shadow)
### ==> MINIMIZE
self.ui.btn_minimize.clicked.connect(lambda: self.showMinimized())
self.ui.btn_close.clicked.connect(lambda: self.close())
| [
"89053434+leonardin999@users.noreply.github.com"
] | 89053434+leonardin999@users.noreply.github.com |
b5b4b09ec0510470ba447b1e2cd4ec172a5f9bf3 | ec551303265c269bf1855fe1a30fdffe9bc894b6 | /old/t20191017_intersection/intersection.py | c3a5a88436acc04574efeb610a63177c156cb504 | [] | no_license | GongFuXiong/leetcode | 27dbda7a5ced630ae2ae65e19d418ebbc65ae167 | f831fd9603592ae5bee3679924f962a3ebce381c | refs/heads/master | 2023-06-25T01:05:45.683510 | 2021-07-26T10:05:25 | 2021-07-26T10:05:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@author: KM
@license: (C) Copyright 2013-2017, Node Supply Chain Manager Corporation Limited.
@contact: yangkm601@gmail.com
@software: garner
@time: 2019/10/17
@url:https://leetcode-cn.com/problems/intersection-of-two-arrays/
@desc:
349. 两个数组的交集
给定两个数组,编写一个函数来计算它们的交集。
示例 1:
输入: nums1 = [1,2,2,1], nums2 = [2,2]
输出: [2]
示例 2:
输入: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
输出: [9,4]
说明:
输出结果中的每个元素一定是唯一的。
我们可以不考虑输出结果的顺序。
'''
import math
class Solution:
def intersection(self, nums1, nums2):
nums1 = set(nums1)
nums2 = set(nums2)
new_nums = []
for num1 in nums1:
if num1 in nums2:
new_nums.append(num1)
return new_nums
if __name__ == "__main__":
solution = Solution()
print("--------1-------")
nums1 = [1,2,2,1]
nums2 = [2,2]
res=solution.intersection(nums1,nums2)
print("res:{0}".format(res))
print("--------2-------")
nums1 = [4,9,5]
nums2 = [9,4,9,8,4]
res=solution.intersection(nums1,nums2)
print("res:{0}".format(res))
| [
"958747457@qq.com"
] | 958747457@qq.com |
93ba99df83854bff81939b53cfb7f0437fc17b99 | baf6e86a56c91e347959ce85c6596a50efc5529c | /srcSklcls/getEvent.py | 5e67ee5dbed33355659a731239813cfbee546ef4 | [] | no_license | qolina/ialp17-fred | 0e43385e739bb9e1dff023be14fd155dc7749581 | 9d8580fae9bdbd477f3ad4094072bb8f4db5df74 | refs/heads/master | 2021-09-08T01:48:50.001632 | 2018-03-05T15:32:57 | 2018-03-05T15:32:57 | 108,222,907 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,690 | py | #! /usr/bin/env python
#coding=utf-8
import time
import re
import os
import sys
import math
import cPickle
from getEventSegPair import *
class Event:
def __init__(self, eventId):
self.eventId = eventId
def updateEvent(self, nodeHash, edgeHash):
self.nodeHash = nodeHash
self.edgeHash = edgeHash
############################
## load seg pair
def loadsegPair(filepath):
inFile = file(filepath,"r")
segmentHash = cPickle.load(inFile)
segPairHash = cPickle.load(inFile)
inFile.close()
return segmentHash, segPairHash
############################
## load wikiGram
def loadWiki(filepath):
wikiProbHash = {}
inFile = file(filepath,"r")
while True:
lineStr = inFile.readline()
lineStr = re.sub(r'\n', ' ', lineStr)
lineStr = lineStr.strip()
if len(lineStr) <= 0:
break
prob = float(lineStr[0:lineStr.find(" ")])
gram = lineStr[lineStr.find(" ")+1:len(lineStr)]
# print gram + "\t" + str(prob)
wikiProbHash[gram] = prob
inFile.close()
print "### " + str(time.asctime()) + " " + str(len(wikiProbHash)) + " wiki grams' prob are loaded from " + inFile.name
return wikiProbHash
############################
## keep top K (value) items in hash
def getTopItems(sampleHash, K):
sortedList = sorted(sampleHash.items(), key = lambda a:a[1], reverse = True)
sampleHash.clear()
sortedList = sortedList[0:K]
for key in sortedList:
sampleHash[key[0]] = key[1]
return sampleHash
# get segments' k nearest neighbor
def getKNN(segPairHash, kNeib):
kNNHash = {}
for pair in segPairHash:
sim = segPairHash[pair]
segArr = pair.split("|")
segId1 = int(segArr[0])
segId2 = int(segArr[1])
nodeSimHash = {}
if segId1 in kNNHash:
nodeSimHash = kNNHash[segId1]
nodeSimHash[segId2] = sim
if len(nodeSimHash) > kNeib:
nodeSimHash = getTopItems(nodeSimHash, kNeib)
kNNHash[segId1] = nodeSimHash
nodeSimHash2 = {}
if segId2 in kNNHash:
nodeSimHash2 = kNNHash[segId2]
nodeSimHash2[segId1] = sim
if len(nodeSimHash2) > kNeib:
nodeSimHash2 = getTopItems(nodeSimHash2, kNeib)
kNNHash[segId2] = nodeSimHash2
print "### " + str(time.asctime()) + " " + str(len(kNNHash)) + " event segments' " + str(kNeib) + " neighbors are got."
return kNNHash
# cluster similar segments into events
def getClusters(kNNHash, segPairHash):
eventHash = {}
eventIdx = 0
nodeInEventHash = {} # segId:eventId # which node(seg) is already been clustered
for segId1 in kNNHash:
nodeSimHash = kNNHash[segId1]
# print "#############################segId1: " + str(segId1)
# print nodeSimHash
for segId2 in nodeSimHash:
if segId2 in nodeInEventHash:
# s2 existed in one cluster, no clustering again
continue
# print "*************segId2: " + str(segId2)
# print kNNHash[segId2]
#[GUA] should also make sure segId2 in kNNHash[segId1]
if segId1 in kNNHash[segId2]:
# s1 s2 in same cluster
#[GUA] edgeHash mapping: segId + | + segId -> simScore
#[GUA] nodeHash mapping: segId -> edgeNum
#[GUA] nodeInEventHash mapping: segId -> eventId
eventId = eventIdx
nodeHash = {}
edgeHash = {}
event = None
if segId1 in nodeInEventHash:
eventId = nodeInEventHash[segId1]
event = eventHash[eventId]
nodeHash = event.nodeHash
edgeHash = event.edgeHash
nodeHash[segId1] += 1
else:
eventIdx += 1
nodeInEventHash[segId1] = eventId
event = Event(eventId)
nodeHash[segId1] = 1
nodeHash[segId2] = 1
if segId1 < segId2:
edge = str(segId1) + "|" + str(segId2)
else:
edge = str(segId2) + "|" + str(segId1)
edgeHash[edge] = segPairHash[edge]
event.updateEvent(nodeHash, edgeHash)
eventHash[eventId] = event
nodeInEventHash[segId2] = eventId
# seg1's k nearest neighbors have been clustered into other events Or
# seg1's k nearest neighbors all have long distance from seg1
if segId1 not in nodeInEventHash:
eventId = eventIdx
eventIdx += 1
nodeHash = {}
edgeHash = {}
event = Event(eventId)
nodeHash[segId1] = 1
event.updateEvent(nodeHash, edgeHash)
eventHash[eventId] = event
nodeInEventHash[segId1] = eventId
print "### " + str(time.asctime()) + " " + str(len(eventHash)) + " events are got with nodes " + str(len(nodeInEventHash))
return eventHash
def eventScoring(eventHash, reverseSegHash, dataFilePath):
eventSegFilePath = dataFilePath + "event" + UNIT + Day
[unitHash, unitDFHash, unitInvolvedHash, unitScoreHash] = loadEvtseg(eventSegFilePath)
score_max = 0.0
score_eventHash = {}
newWorthScore_nodeHash = {}
for eventId in sorted(eventHash.keys()):
event = eventHash[eventId]
nodeList = event.nodeHash.keys()
edgeHash = event.edgeHash
nodeNum = len(nodeList)
# part1
nodeZScoreArr = [float(unitScoreHash[segId][:unitScoreHash[segId].find("-")]) for segId in nodeList]
zscore_nodes = sum(nodeZScoreArr)
# part2
nodeStrList = [reverseSegHash[nodeid] for nodeid in nodeList]
node_NewWorthScoreArr = [frmNewWorth(nodeStr) for nodeStr in nodeStrList]
newWorthScore_nodes = sum(node_NewWorthScoreArr)
newWorthScore_nodeHash.update(dict([(nodeStrList[i], node_NewWorthScoreArr[i]) for i in range(nodeNum)]))
simScore_edge = sum(edgeHash.values())
scoreParts_eventArr = [newWorthScore_nodes, simScore_edge, zscore_nodes]
score_event = (newWorthScore_nodes/nodeNum) * (simScore_edge/nodeNum)
if score_event <= 0:
print "##0-score event", eventId, nodeStrList, scoreParts_eventArr
continue
score_eventHash[eventId] = score_event
if score_event > score_max:
score_max = score_event
score_eventHash = dict([(eventId, score_max/score_eventHash[eventId]) for eventId in score_eventHash])
score_nodeHash = newWorthScore_nodeHash
print "###Score of events and nodes are obtained. ", len(score_eventHash), len(score_nodeHash), score_max
return score_eventHash, score_nodeHash
# filtering Or scoreing
def eventScoring_mu(eventHash, reverseSegHash):
segmentNewWorthHash = {}
mu_max = 0.0
mu_eventHash = {}
for eventId in sorted(eventHash.keys()):
event = eventHash[eventId]
nodeList = event.nodeHash.keys()
edgeHash = event.edgeHash
segNum = len(nodeList)
mu_sum = 0.0
sim_sum = 0.0
contentArr = [reverseSegHash[id] for id in nodeList]
currNewWorthHash = {}
for segment in contentArr:
mu_s = frmNewWorth(segment)# for frame structure
#mu_s = segNewWorth(segment) # for segment
segmentNewWorthHash[segment] = mu_s
currNewWorthHash[segment] = mu_s
mu_sum += mu_s
sim_sum = sum(edgeHash.values())
mu_avg = mu_sum/segNum
sim_avg = sim_sum/segNum
mu_e = mu_avg * sim_avg
if mu_e > 0:
mu_eventHash[eventId] = mu_e
if mu_e > mu_max:
mu_max = mu_e
print "### Aft filtering 0 mu_e " + str(len(mu_eventHash)) + " events are kept. mu_max: " + str(mu_max)
score_eventHash = dict([(eventId, mu_max/mu_eventHash[eventId]) for eventId in mu_eventHash])
return score_eventHash, segmentNewWorthHash
############################
## newsWorthiness
def frmNewWorth(frm):
frm = frm.strip("|")
segArr = frm.split("|")
worthArr = [segNewWorth(seg) for seg in segArr]
#return sum(worthArr)/len(worthArr)
return sum(worthArr)
def segNewWorth(segment):
wordArr = segment.split("_")
wordNum = len(wordArr)
if wordNum == 1:
if segment in wikiProbHash:
return math.exp(wikiProbHash[segment])
else:
return 0.0
maxProb = 0.0
for i in range(0, wordNum):
for j in range(i+1, wordNum+1):
subArr = wordArr[i:j]
prob = 0.0
subS = " ".join(subArr)
if subS in wikiProbHash:
prob = math.exp(wikiProbHash[subS]) - 1.0
if prob > maxProb:
maxProb = prob
# if maxProb > 0:
# print "Newsworthiness of " + segment + " : " + str(maxProb)
return maxProb
def writeEvent2File(eventHash, score_eventHash, score_nodeHash, reverseSegHash, tStr, kNeib, taoRatio):
if len(sys.argv) == 2:
eventFile = file(dataFilePath + "EventFile" + tStr + "_k" + str(kNeib) + "t" + str(taoRatio), "w")
else:
eventFile = file(eventFileName + "_k" + str(kNeib) + "t" + str(taoRatio), "w")
sortedEventlist = sorted(score_eventHash.items(), key = lambda a:a[1])
eventNum = 0
# for statistic
nodeLenHash = {}
eventNumHash = {}
for eventItem in sortedEventlist:
eventNum += 1
eventId = eventItem[0]
event = eventHash[eventId]
edgeHash = event.edgeHash
nodeHash = event.nodeHash
nodeList = event.nodeHash.keys()
rankedNodeList_byId = sorted(nodeHash.items(), key = lambda a:a[1], reverse = True)
nodeList_byId = [item[0] for item in rankedNodeList_byId]
segList = [reverseSegHash[id] for id in nodeList_byId]
nodeNewWorthHash = dict([(segId, score_nodeHash[reverseSegHash[segId]]) for segId in nodeList])
rankedNodeList_byNewWorth = sorted(nodeNewWorthHash.items(), key = lambda a:a[1], reverse = True)
segList_byNewWorth = [reverseSegHash[item[0]] for item in rankedNodeList_byNewWorth]
# for statistic
nodes = len(nodeList)
if nodes in nodeLenHash:
nodeLenHash[nodes] += 1
else:
nodeLenHash[nodes] = 1
ratioInt = int(eventItem[1])
if ratioInt <= 10:
if ratioInt in eventNumHash:
eventNumHash[ratioInt] += 1
else:
eventNumHash[ratioInt] = 1
eventFile.write("****************************************\n###Event " + str(eventNum) + " ratio: " + str(eventItem[1]))
eventFile.write(" " + str(len(nodeList)) + " nodes and " + str(len(edgeHash)) + " edges.\n")
eventFile.write(str(nodeList_byId) + "\n")
eventFile.write(" ".join(segList) + "\n")
eventFile.write(" ".join(segList_byNewWorth) + "\n")
eventFile.write(str(edgeHash) + "\n")
eventFile.close()
############################
## cluster Event Segment
def clusterEventSegment(dataFilePath, kNeib, taoRatio):
fileList = os.listdir(dataFilePath)
for item in sorted(fileList):
if item.find("relSkl_") != 0:
continue
tStr = item[-2:]
if tStr != Day:
continue
print "Time window: " + tStr
if len(sys.argv) == 2:
segPairFilePath = dataFilePath + "segPairFile" + tStr
else:
segPairFilePath = segPairFileName
[segmentHash, segPairHash] = loadsegPair(segPairFilePath)
print "### " + str(time.asctime()) + " " + str(len(segmentHash)) + " event segments in " + segPairFilePath + " are loaded. With segment pairs Num: " + str(len(segPairHash))
kNNHash = getKNN(segPairHash, kNeib)
eventHash = getClusters(kNNHash, segPairHash)
reverseSegHash = dict([(segmentHash[seg], seg) for seg in segmentHash])
[score_eventHash, score_nodeHash] = eventScoring(eventHash, reverseSegHash, dataFilePath)
writeEvent2File(eventHash, score_eventHash, score_nodeHash, reverseSegHash, tStr, kNeib, taoRatio)
global UNIT
UNIT = "skl"
############################
## main Function
if __name__=="__main__":
print "###program starts at " + str(time.asctime())
global Day, segPairFileName, eventFileName
if len(sys.argv) > 2:
Day = sys.argv[1]
segPairFileName = sys.argv[2]
eventFileName = sys.argv[3]
elif len(sys.argv) == 2:
Day = sys.argv[1]
else:
print "Usage getEvent.py day [segPairFileName] [eventFileName]"
sys.exit()
kNeib = 5
taoRatio = 2
dataFilePath = r"../ni_data/"
wikiPath = "../data/anchorProbFile_all"
if True:
global wikiProbHash
wikiProbHash = loadWiki(wikiPath)
clusterEventSegment(dataFilePath, kNeib, taoRatio)
# exp: for choosing suitable parameters
#for kNeib in range(4,7):
# clusterEventSegment(dataFilePath, kNeib, taoRatio)
#for taoRatio in range(3,6):
# clusterEventSegment(dataFilePath, kNeib, taoRatio)
print "###program ends at " + str(time.asctime())
| [
"qolina@gmail.com"
] | qolina@gmail.com |
f0b58de91c55f71530c50df047a31bbb1fe13f48 | 7de954bcc14cce38758463f0b160c3c1c0f7df3f | /cmsplugin_cascade/cms_plugins.py | 5910ca915a921223057b73e18cdafecb596be46f | [
"MIT"
] | permissive | pmutale/djangocms-cascade | c66210a0afad0d2783c2904732972fb8890d7614 | 066b8a1ca97d3afd8b79968a7f5af506a265095c | refs/heads/master | 2022-10-28T12:34:18.610777 | 2015-08-21T20:58:03 | 2015-08-21T20:58:03 | 41,476,721 | 0 | 0 | MIT | 2022-10-23T19:43:13 | 2015-08-27T09:05:23 | Python | UTF-8 | Python | false | false | 1,098 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from .settings import CASCADE_PLUGINS
for module in CASCADE_PLUGINS:
try:
# if a module was specified, load all plugins in module settings
module_settings = import_module('{}.settings'.format(module))
module_plugins = getattr(module_settings, 'CASCADE_PLUGINS', [])
for p in module_plugins:
try:
import_module('{}.{}'.format(module, p))
except ImportError as err:
msg = "Plugin {} as specified in {}.settings.CMSPLUGIN_CASCADE_PLUGINS could not be loaded: {}"
raise ImproperlyConfigured(msg.format(p, module, err.message))
except ImportError:
try:
# otherwise try with cms_plugins in the named module
import_module('{}.cms_plugins'.format(module))
except ImportError:
# otherwise just use the named module as plugin
import_module('{}'.format(module))
| [
"jacob.rief@gmail.com"
] | jacob.rief@gmail.com |
a36a97b7755caecb44009eb13acd970f356a1e1d | b5ef3b9da130f604f111bd469128b73e78d6ba9d | /bt5/erp5_crm/SkinTemplateItem/portal_skins/erp5_crm/Event_setTextContentFromNotificationMessage.py | f1a3d8ee8cc3c8743764af664b9b86970b517a98 | [] | no_license | soediro/erp5 | 154bb2057c4cd12c14018c1ab2a09a78b2d2386a | 3d1a8811007a363b7a43df4b295b5e0965c2d125 | refs/heads/master | 2021-01-11T00:31:05.445267 | 2016-10-05T09:28:05 | 2016-10-07T02:59:00 | 70,526,968 | 1 | 0 | null | 2016-10-10T20:40:41 | 2016-10-10T20:40:40 | null | UTF-8 | Python | false | false | 1,330 | py | portal = context.getPortalObject()
if not language:
language = context.getLanguage()
if not language:
language = portal.portal_preferences.getPreferredCustomerRelationLanguage()
notification_message = portal.portal_notifications.getDocumentValue(
language=language,
reference=reference)
if substitution_method_parameter_dict is None:
substitution_method_parameter_dict = {}
# Notification method will receive the current event under "event_value" key.
# This way notification method can return properties from recipient or follow up of the event.
substitution_method_parameter_dict.setdefault('event_value', context)
if notification_message is not None:
context.setContentType(notification_message.getContentType())
target_format = "txt"
if context.getContentType() == 'text/html':
target_format = "html"
mime, text_content = notification_message.convert(target_format,
substitution_method_parameter_dict=substitution_method_parameter_dict)
context.setTextContent(text_content)
context.setAggregateList(notification_message.getProperty('aggregate_list', []))
if not context.hasTitle():
context.setTitle(notification_message.asSubjectText(
substitution_method_parameter_dict=substitution_method_parameter_dict))
| [
"georgios.dagkakis@nexedi.com"
] | georgios.dagkakis@nexedi.com |
a0dd12ad29f566a0c62075e3ac57d306a8d68e30 | b5811a11a7d22414a5690a681cdbb6ab95e08e06 | /backend/employee/admin.py | bed287a77df0de1ac2ec6fccebbe4036c36d5f07 | [] | no_license | kausko/PULSE-X | fa2d2fe913c0d091d807b245d4946922536c699c | 88a89d3fcd20289679e3b68aa36561b24ae9ea4e | refs/heads/master | 2023-05-09T12:47:44.860620 | 2021-04-21T14:07:43 | 2021-04-21T14:07:43 | 303,328,807 | 0 | 0 | null | 2021-06-05T00:10:07 | 2020-10-12T08:37:08 | Jupyter Notebook | UTF-8 | Python | false | false | 272 | py | from django.contrib import admin
from .models import Review
@admin.register(Review)
class ReviewAdmin(admin.ModelAdmin):
list_display = ['user', 'sentiment', 'flag', 'visited', 'sarcasm', 'helpfulness', 'is_twitter', ]
list_filter = ['visited', 'is_twitter', ]
| [
"tanmaypardeshi@gmail.com"
] | tanmaypardeshi@gmail.com |
63c96727ba5c7934d8f0c298575bb0199dc6bd74 | 5f10ca2439551040b0af336fd7e07dcc935fc77d | /Binary tree/二叉树性质相关题目/110. Balanced Binary Tree.py | b38c2eadc37c9e14c688c3837e5efefee9db1c50 | [] | no_license | catdog001/leetcode2.0 | 2715797a303907188943bf735320e976d574f11f | d7c96cd9a1baa543f9dab28750be96c3ac4dc731 | refs/heads/master | 2021-06-02T10:33:41.552786 | 2020-04-08T04:18:04 | 2020-04-08T04:18:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,477 | py | # -*- coding: utf-8 -*-
# @Time : 2/11/2020 8:28 PM
# @Author : LI Dongdong
# @FileName: 110. Balanced Binary Tree.py
''''''
'''
题目分析
1.要求:Given a binary tree, determine if it is height-balanced.
For this problem, a height-balanced binary tree is defined as:
a binary tree in which the left and right subtrees of every node differ in height by no more than 1.
Example 1:
Given the following tree [3,9,20,null,null,15,7]:
3
/ \
9 20
/ \
15 7
Return true.
Example 2:
Given the following tree [1,2,2,3,3,null,null,4,4]:
1
/ \
2 2
/ \
3 3
/ \
4 4
Return false.
2.理解:left and right node's subtree height difference is no more than 1
3.类型:character of tree
4.确认输入输出及边界条件:
input: root with definition, no range, repeated? Y order? N
output: True/False
corner case: None -> True Only one-> True
4.方法及方法分析:top-down-dfs bottom-up-dfs
time complexity order: top-down-dfs O(N) < brute force-dfs O(NlogN)
space complexity order: top-down-dfs O(N) = brute force-dfs O(N)
'''
from collections import deque
def constructTree(nodeList): # input: list using bfs, output: root
new_node = []
for elem in nodeList: # transfer list val to tree node
if elem:
new_node.append(TreeNode(elem))
else:
new_node.append(None)
queue = deque()
queue.append(new_node[0])
resHead = queue[0]
i = 1
while i <= len(new_node) - 1: # bfs method building
head = queue.popleft()
head.left = new_node[i] # build left and push
queue.append(head.left)
if i + 1 == len(new_node): # if no i + 1 in new_node
break
head.right = new_node[i + 1] # build right and push
queue.append(head.right)
i = i + 2
return resHead
'''
A.
思路:top-down-dfs
方法:
比较每个节点的子树的最大高度
main function: scan every node, while compare max height of every node's subtree by DFS or BFS
helper function: calculate the max height of a root by DFS or BFS
time complex:
skewed tree: O(N*N),but after check the height of the first 2 subtrees, function stop,
so it is actually O(N*2) = O(N)
average: for height function, O(logN). So it was O(NlogN) for N nodes.
space complex: O(N) The recursion stack may contain all nodes if the tree is skewed.
易错点:测量高度的函数
'''
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
if not root: # corner case
return True
if abs(self.depth(root.left) - self.depth(root.right)) > 1: # check root
return False
return self.isBalanced(root.left) and self.isBalanced(root.right) # check subtree
def depth(self, root): # calculate the height of tree, input:root, output:int
if not root: # corner case
return 0
if not root.left and not root.right: # corner case
return 1
return 1 + max(self.depth(root.left), self.depth(root.right)) # dfs to accumulate depth
root = constructTree([3,9,20,None, None, 15,7])
X = Solution()
print(X.isBalanced(root))
'''
自己的写法
'''
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
if not root:
return True
if abs(self.depth(root.left, 0) - self.depth(root.right, 0)) > 1:
return False
return self.isBalanced(root.left) and self.isBalanced(root.right)
def depth(self, root, numb): # input: root, output: depth
if not root:
return numb
if not root.left and root.right:
return self.depth(root.right, numb + 1)
if root.left and not root.right:
return self.depth(root.left, numb + 1)
return max(self.depth(root.left, numb + 1), self.depth(root.right, numb + 1))
'''
test code
input None - True, only one - True
input
3
/ \
9 20
/ \
15 7
root 3 9 20 15 7
root.left 9 None 15 None None
root.right 20 NOne 7 None None
abs(L-R) 1 0 9 0 0
'''
'''
B.
要返回是否平衡,就要需要目前最大深度这个中间变量,故dfs返回两个值,一个是是否平衡,一个是高度
基于求最大深度的模板修改,dfs可以返回多个性质,bottom up的思路
dfs返回是否是balanced,和height
'''
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
if not root: # corner case
return True
def dfs(root): # return max height and if is balanced
if not root:
return True, 0
leftBalanced, leftH = dfs(root.left)
rightBalanced, rightH = dfs(root.right)
if abs(leftH - rightH) > 1 or not leftBalanced or not rightBalanced:
return False, max(leftH, rightH) + 1
else:
return True, max(leftH, rightH) + 1
isBalanced, maxHeight = dfs(root)
if isBalanced:
return True
else:
return False
'''
思路:bottom-up- 栈模拟递归
'''
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
depth, stack = {None: 0}, [(root, False)]
while stack:
node, visited = stack.pop()
if not node:
continue
if not visited:
stack.append((node, True))
stack.append((node.right, False))
stack.append((node.left, False))
else:
left, right = depth[node.left], depth[node.right]
if left == -1 or right == -1 or abs(left-right) > 1:
depth[node] = -1 # or return False`
else:
depth[node] = max(left, right) + 1
return depth[root] != -1
'''
test code
input None - True, only one - True
input
3
/ \
9 20
/ \
15 7
root 3 9 20 15 7
root.left 9 15
root.right
depth left 0 0 0
depth right 0 0 0
abs 1 0 0 0
return 3 1 2 1 1
'''
| [
"lidongdongbuaa@gmail.com"
] | lidongdongbuaa@gmail.com |
1289da36f9af10997b469d227d42e7e76c5f609a | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/insert_20200610210502.py | c8dce3ee0bda5a8dea0ebbfa3dee6e32c5927854 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | # nums is a list
# find where n is to be inserted
# soo,you loop through the array
# the array is sorted
# to know the position you should check whethere n is greater than nums[i]
# continue the loop as you check
def Insert(nums,n):
i = 0
while i < len(nums):
if n != nums[i]:
if n > nums[i]:
i +=1
# print(i-1)
else:
print(i+1)
return i+
else:
print(i)
return i
Insert([1,3,4,6],5)
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
00b249fc818e6a25ba0fe40b06a35ccf0a5fb550 | 237162607427106ae9564670d47427a62356861f | /core/migrations/0141_planitem_realize_every_time.py | 46c28c415ce382c8aa4bb459be31cda393ee0fd7 | [] | no_license | pitipund/basecore | 8648c1f4fa37b6e6075fd710ca422fe159ba930e | a0c20cec1e17dd0eb6abcaaa7d2623e38b60318b | refs/heads/master | 2020-09-13T20:16:02.622903 | 2019-11-20T09:07:15 | 2019-11-20T09:07:15 | 221,885,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-06-07 18:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0140_doctorgroup'),
]
operations = [
migrations.AddField(
model_name='planitem',
name='realize_every_time',
field=models.BooleanField(default=False),
),
]
| [
"longman_694@hotmail.com"
] | longman_694@hotmail.com |
3c1ab0bac6360d881bc4117a080e38bb0d5ced9e | 19d1a808c9bb3dfcbd4a5b852962e6f19d18f112 | /python/multiprocessing_lock.py | eeaf149caf3dc777cc922aeada80352bfebff0f6 | [] | no_license | dataAlgorithms/data | 7e3aab011a9a2442c6d3d54d8d4bfd4d1ce0a6d3 | 49c95a0e0d0c23d63be2ef095afff76e55d80f5d | refs/heads/master | 2020-04-15T12:45:34.734363 | 2018-04-21T10:23:48 | 2018-04-21T10:23:48 | 61,755,627 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | from multiprocessing import Process, Lock
def worker_with(lock, f):
with lock:
fs = open(f, "a+")
fs.write("Lock acquired via with\n")
fs.close()
def worker_no_with(lock, f):
lock.acquire()
try:
fs = open(f, "a+")
fs.write("Lock acquired directly\n")
fs.close()
finally:
lock.release()
if __name__ == "__main__":
f = "file.txt"
lock = Lock()
w = Process(target=worker_with, args=(lock, f))
nw = Process(target=worker_no_with, args=(lock, f))
w.start()
nw.start()
w.join()
nw.join() | [
"noreply@github.com"
] | dataAlgorithms.noreply@github.com |
fbdf99d5569a466f8d2cc4657e6077b12baf4099 | 97884252481ff208519194ecd63dc3a79c250220 | /pyobs/events/roofopened.py | 35b412ca265e434dcd39f53c97dcd70ec21adcbf | [
"MIT"
] | permissive | pyobs/pyobs-core | a1f30137d7f991bad4e115de38f543e59a6e30d2 | 2d7a06e5485b61b6ca7e51d99b08651ea6021086 | refs/heads/master | 2023-09-01T20:49:07.610730 | 2023-08-29T09:20:05 | 2023-08-29T09:20:05 | 174,351,157 | 9 | 3 | NOASSERTION | 2023-09-14T20:39:48 | 2019-03-07T13:41:27 | Python | UTF-8 | Python | false | false | 185 | py | from .event import Event
class RoofOpenedEvent(Event):
"""Event to be sent when the roof has finished opening."""
__module__ = "pyobs.events"
__all__ = ["RoofOpenedEvent"]
| [
"thusser@uni-goettingen.de"
] | thusser@uni-goettingen.de |
f46b2f2104443a678e0c17cc4637eb0196ada70d | 24cce1ec7737f9ebb6df3e317a36c0a0329ec664 | /HZMX/amazon_api/test/cs.py | f11fa5cfe2aaec1f3ac126b2c6af0d554b90c6c6 | [] | no_license | tate11/HangZhouMinXing | ab261cb347f317f9bc4a77a145797745e2531029 | 14b7d34af635db015bd3f2c139be1ae6562792f9 | refs/heads/master | 2021-04-12T04:23:20.165503 | 2018-03-14T05:02:05 | 2018-03-14T05:02:05 | 125,855,729 | 1 | 0 | null | 2018-03-19T12:42:07 | 2018-03-19T12:42:07 | null | UTF-8 | Python | false | false | 135 | py | # -*- coding:utf-8 -*-
import re
from lxml import etree
import requests
import copy
print u'共%d个产品,已创建%d个' % (1, 2) | [
"1121403085"
] | 1121403085 |
62febde352acd9a829c1333257e2334a366cc431 | bae75bf1de75fb1b76e19b0d32c778e566de570a | /smodels/docs/manual/source/recipes/inputFiles/scanExample/smodels-output/100338791.slha.py | a1a1d99d5e480d827a67ed6cf36f55d17aaef125 | [] | no_license | andlessa/RDM | 78ae5cbadda1875c24e1bb726096b05c61627249 | ac6b242871894fee492e089d378806c2c2e7aad8 | refs/heads/master | 2023-08-16T00:47:14.415434 | 2021-09-21T20:54:25 | 2021-09-21T20:54:25 | 228,639,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,221 | py | smodelsOutput = {'OutputStatus': {'sigmacut': 0.01, 'minmassgap': 5.0, 'maxcond': 0.2, 'ncpus': 1, 'file status': 1, 'decomposition status': 1, 'warnings': 'Input file ok', 'input file': 'inputFiles/scanExample/slha/100338791.slha', 'database version': '1.2.0', 'smodels version': '1.2.0rc'}, 'ExptRes': [{'maxcond': 0.01709543538595535, 'theory prediction (fb)': 0.004263624069134947, 'upper limit (fb)': 0.268, 'expected upper limit (fb)': 0.268, 'TxNames': ['TSlepSlep'], 'Mass (GeV)': [[513.7, 336.0], [513.7, 336.0]], 'AnalysisID': 'ATLAS-SUSY-2013-11', 'DataSetID': 'mT2-150-SF', 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType': 'efficiencyMap', 'r': 0.015909045034085623, 'r_expected': 0.015909045034085623, 'chi2': 0.01981757950430829, 'likelihood': 0.1262204906547757}], 'Total xsec considered (fb)': 85188.01117466655, 'Missed Topologies': [{'sqrts (TeV)': 13.0, 'weight (fb)': 845.86936895942, 'element': "[[[jet,jet]],[[jet],[jet,jet]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 46.844075054667165, 'element': "[[[jet]],[[jet],[jet,jet]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 46.62875381033042, 'element': "[[[jet,jet]],[[jet],[jet]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 24.825679454154606, 'element': "[[[jet,jet]],[[jet],[nu],[ta]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 23.83990502372118, 'element': "[[[jet,jet]],[[jet],[ta]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 12.404236616382951, 'element': "[[[jet,jet]],[[jet],[ta],[ta]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 11.910812732326013, 'element': "[[[jet,jet]],[[nu],[ta]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 11.437859931219778, 'element': "[[[ta]],[[jet,jet]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 6.755826556536986, 'element': "[[[jet],[jet,jet]],[[jet],[jet,jet]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 6.520134295462761, 'element': "[[],[[jet,jet]]] ('MET', 'MET')"}], 'Long Cascades': [{'sqrts (TeV)': 13.0, 'weight (fb)': 25.970849347641987, 'mother PIDs': [[1000002, 1000021], [1000004, 1000021]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 13.25792825463179, 'mother PIDs': [[1000001, 1000021], [1000003, 1000021]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 0.1651851280824815, 'mother PIDs': [[1000001, 2000001]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 0.1615845271253974, 'mother PIDs': [[1000002, 2000002]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 0.0986933032919052, 'mother PIDs': [[1000001, 1000002]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 0.04611568688152721, 'mother PIDs': [[1000002, 2000001]]}], 'Asymmetric Branches': [{'sqrts (TeV)': 13.0, 'weight (fb)': 360.16498517455005, 'mother PIDs': [[1000002, 1000021], [1000004, 1000021]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 304.5923886521284, 'mother PIDs': [[1000021, 2000002]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 178.14003310055807, 'mother PIDs': [[1000001, 1000021], [1000003, 1000021]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 118.8973713804067, 'mother PIDs': [[1000021, 2000001], [1000021, 2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 24.636294792140646, 'mother PIDs': [[1000021, 1000024]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 5.027322736976338, 'mother PIDs': [[1000021, 2000004]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 4.619342989832711, 'mother PIDs': [[1000021, 1000022]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 4.619288610927034, 'mother PIDs': [[1000021, 1000023]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 2.696993182745791, 'mother PIDs': [[1000002, 2000002]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 2.6688423016631457, 'mother PIDs': [[1000001, 2000001]]}], 'Outside Grid': [{'sqrts (TeV)': 13.0, 'weight (fb)': 75542.54771768965, 'element': "[[[jet,jet]],[[jet,jet]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 8360.179581065855, 'element': "[[[jet]],[[jet,jet]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 231.2181967643345, 'element': "[[[jet]],[[jet]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 4.436027950247279, 'element': "[[[b,b]],[[jet,jet]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 0.24463607992250824, 'element': "[[[jet]],[[b,b]]] ('MET', 'MET')"}]} | [
"lessa.a.p@gmail.com"
] | lessa.a.p@gmail.com |
b78fb673d40631f1eb4b0d2635d17b5e2ad390eb | 05032af4b4c522d4c3ee2d70e61ee1f30fa6abf3 | /12_Accepting_user_inputs_GUI.py | 288d5c19d7bd0aa4d05cdaf185b16feaa5489bf8 | [] | no_license | tayyabmalik4/python_GUI | c2db4bd6b4f2a153e5bced69073b17240126e7d0 | 608a7e43e17a27b90239a2ebae3338ce52d7b20d | refs/heads/main | 2023-07-28T01:31:41.840077 | 2021-09-11T17:45:23 | 2021-09-11T17:45:23 | 404,079,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | # (12)*************************Accepting User Inputs in new text file in tkinter form**************************
from tkinter import *
root = Tk()
root.geometry("644x344")
def getvals():
print(f"{namevalue.get(),phonevalue.get(),gendervalue.get(),contactvalue.get(),paymentvalue.get(),foodservicevalue.get()}")
with open('12_Accepting_user_inputs_records.txt','a') as f:
f.write(f"{namevalue.get(),phonevalue.get(),gendervalue.get(),contactvalue.get(),paymentvalue.get(),foodservicevalue.get()}\n")
# -----Creating Labels
Label(root, text="Welcome to Tayyab Travels",font='comixsansms 13 bold',pady=15).grid(row=0,column=3)
name = Label(root, text='Name')
phone = Label(root, text= "phone")
gender = Label(root, text= "Gender")
contact = Label(root, text="Emergency Contect")
payment = Label(root, text="Payment Mode")
name.grid(row=1, column=2)
phone.grid(row=2, column=2)
gender.grid(row=3, column=2)
contact.grid(row=4, column=2)
payment.grid(row=5, column=2)
# ----Now Creating the variable which we store the entries
namevalue = StringVar()
phonevalue = StringVar()
gendervalue = StringVar()
contactvalue = StringVar()
paymentvalue = StringVar()
foodservicevalue = IntVar()
# -----Now Creat a Entry using Entry class for our form
nameentry =Entry(root,textvariable=namevalue)
phoneentry = Entry(root, textvariable=phonevalue)
genderentry = Entry(root, textvariable=gendervalue)
contactentry = Entry(root, textvariable=contactvalue)
paymententry = Entry(root, textvariable=paymentvalue)
# ----Now packing the entries using grid class
nameentry.grid(row=1,column=3)
phoneentry.grid(row=2,column=3)
genderentry.grid(row=3,column=3)
contactentry.grid(row=4,column=3)
paymententry.grid(row=5,column=3)
# ---creating Checkbox
foodservice = Checkbutton(text="Want to prebool your meals? ",variable= foodservicevalue)
foodservice.grid(row=6,column=3)
# ----Button and packing it and assigning it a command
Button(text="Submit to Tayyab Travels",command=getvals).grid(row=7,column=3)
root.mainloop()
| [
"mtayyabmalik99@gmail.com"
] | mtayyabmalik99@gmail.com |
fc29b47a813bb30aeb84be26305c0dd6d6477bca | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/Python_Hand-on_Solve_200_Problems/Section 17 Recursion/sum_of_list_solution.py | d50c69d3d31130293893aaab17cbfa9b1112274f | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 320 | py | # # To add a new cell, type '# %%'
# # To add a new markdown cell, type '# %% [markdown]'
# # %%
# # Write a Python program to calculate the sum of a list of numbers. (in recursion fashion)
#
# ___ list_sum num_List
# __ le. ? __ 1
# r_ ? 0
# ____
# r_ ? 0 + ? ? 1|
#
# print ? 2, 4, 5, 6, 7
#
#
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.