blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3031f60d47293d298f6f3794ad9e04eec3fab55e | 142f576417971267515b65581c106c91823d089e | /models/tsp_agent.py | e32f9a03c62062a25be1148cafda8fcb469fabe1 | [] | no_license | neo-pan/TSP-experiment | b2d490597969db6381b51aafeb0b4e6db13538ab | 8ea336a411fda248b2b08e08184ffc63a522b6ed | refs/heads/main | 2023-06-24T17:16:56.555347 | 2021-07-17T10:23:05 | 2021-07-17T10:23:05 | 318,204,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,300 | py | from environments.tsp import TSPState
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Any, NamedTuple, Tuple
from torch_geometric.data import Data, Batch
from .encoder import cal_size_list, MLP, GNNEncoder
from .decoder import AttentionDecoder
class AttentionInfoFixed(NamedTuple):
"""
Context for AttentionModel decoder that is fixed during decoding so can be precomputed/cached
This class allows for efficient indexing of multiple Tensors at once
"""
node_embeddings: torch.Tensor
graph_context_projected: torch.Tensor
glimpse_key: torch.Tensor
glimpse_val: torch.Tensor
logit_key: torch.Tensor
class TSPAgent(nn.Module):
def __init__(self, args: Any) -> None:
super().__init__()
self.args = args
self.node_dim = args.node_dim
self.edge_dim = args.edge_dim
self.embed_dim = args.embed_dim
self.num_gnn_layers = args.num_gnn_layers
self.encoder_num_heads = args.encoder_num_heads
self.decoder_num_heads = args.decoder_num_heads
self.bias = args.bias
self.tanh_clipping = args.tanh_clipping
self.pooling_method = args.pooling_method
self.normalization = args.normalization
self.set_decode_type(args.decode_type)
self.node_embedder = nn.Linear(self.node_dim, self.embed_dim)
self.edge_embedder = nn.Linear(self.edge_dim, self.embed_dim)
self.encoder = GNNEncoder(
self.embed_dim,
self.num_gnn_layers,
self.encoder_num_heads,
self.normalization,
pooling_method=self.pooling_method,
)
self.graph_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
self.step_proj = nn.Linear(self.embed_dim * 2, self.embed_dim, bias=False)
self.project_node_embeddings = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False)
self.decoder = AttentionDecoder(
self.embed_dim, self.decoder_num_heads, bias=False, tanh_clipping=self.tanh_clipping
)
self.W_placeholder = nn.Parameter(torch.Tensor(2 * self.embed_dim))
self.W_placeholder.data.uniform_(-1, 1) # Placeholder should be in range of activations
def set_decode_type(self, decode_type: str) -> None:
assert decode_type in ["greedy", "sampling"]
self.decode_type = decode_type
def init_embed(self, data: Batch) -> Batch:
assert data.pos.size(-1) == self.node_dim
assert data.edge_attr.size(-1) == self.edge_dim
x = self.node_embedder(data.pos)
edge_attr = self.edge_embedder(data.edge_attr)
d = data.clone()
d.x = x
d.edge_attr = edge_attr
return d
def precompute_fixed(self, node_embeddings: torch.Tensor, graph_feat: torch.Tensor) -> AttentionInfoFixed:
graph_context = self.graph_proj(graph_feat)
glimpse_K, glimpse_V, logit_K = self.project_node_embeddings(node_embeddings).chunk(3, dim=-1)
glimpse_K = glimpse_K.permute(1, 0, 2).contiguous() # (num_nodes, batch_size, embed_dim)
glimpse_V = glimpse_V.permute(1, 0, 2).contiguous() # (num_nodes, batch_size, embed_dim)
logit_K = logit_K.contiguous() # ((batch_size, num_nodes, embed_dim))
return AttentionInfoFixed(
node_embeddings=node_embeddings,
graph_context_projected=graph_context,
glimpse_key=glimpse_K,
glimpse_val=glimpse_V,
logit_key=logit_K,
)
def forward(self, state: TSPState, fixed: AttentionInfoFixed) -> Tuple[torch.Tensor, torch.Tensor]:
batch_size, num_nodes, _ = fixed.node_embeddings.shape
assert state.first_node is None or list(state.first_node.shape) == list(state.pre_node.shape) == [
batch_size,
1,
], f"{state.first_node.shape}-{state.pre_node.shape}-{batch_size}"
assert list(state.avail_mask.shape) == [
batch_size,
num_nodes,
], f"{state.avail_mask.shape}-{[batch_size, num_nodes]}"
# Transform node features for attention compute
query = self._make_query(state, fixed.node_embeddings, fixed.graph_context_projected)
mask = ~state.avail_mask.unsqueeze(1) # (batch_size, 1, num_nodes)
log_p = self.decoder(query, fixed.glimpse_key, fixed.glimpse_val, fixed.logit_key, mask)
selected = self._select_node(log_p, mask.squeeze())
return selected, log_p
def _make_query(self, state: TSPState, node_embeddings: torch.Tensor, graph_context: torch.Tensor) -> torch.Tensor:
r"""
query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
"""
batch_size, _, embed_dim = node_embeddings.shape
if state.first_node is not None:
step_context = self.step_proj(
node_embeddings.gather(
1, torch.cat((state.first_node, state.pre_node), 1)[:, :, None].expand(batch_size, 2, embed_dim),
).view(batch_size, -1)
)
else:
step_context = self.step_proj(self.W_placeholder.expand(batch_size, -1))
query = (graph_context + step_context).unsqueeze(0)
assert list(query.shape) == [1, batch_size, embed_dim], query.shape
return query.contiguous()
def _select_node(self, log_p: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
assert log_p.shape == mask.shape, f"{log_p.shape}, {mask.shape}"
probs = log_p.exp()
assert not torch.isnan(probs).any(), "Probs should not contain any nans"
if self.decode_type == "greedy":
_, selected = probs.max(1)
selected = selected.unsqueeze(-1)
assert not mask.gather(1, selected).any(), "Decode greedy: infeasible action has maximum probability"
elif self.decode_type == "sampling":
selected = probs.multinomial(1)
while mask.gather(1, selected).any():
print("Sampled bad values, resampling!")
print(selected)
print(mask)
selected = probs.multinomial(1)
else:
assert False, "Unknown decode type"
return selected
| [
"xh_pan@outlook.com"
] | xh_pan@outlook.com |
ec8437e1981057ad8a88d6aac492756de87b61b6 | dd3e2450105a29f41a471f7b95e4c0bf8869d836 | /x_of_a_kind_of_a_deck_in_cards.py | 61d9545da98bbffdd6e33f5579892d8c0b60a329 | [] | no_license | lgf8704/learn_git | 0b6fc35763dd416fdf174b179442b9313c4005aa | d55b68af33327da30a729c4e3bb184483dfe0bdd | refs/heads/master | 2022-04-25T14:24:03.345598 | 2020-04-28T08:24:59 | 2020-04-28T08:24:59 | 250,287,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | class Solution:
def hasGroupsSizeX(self, deck: List[int]) -> bool:
# # deck中只有1个元素,返回False
# if len(deck) == 1:
# return False
# # 定义纸牌频数的字典,也可以用
# dic = {}
# for i in deck:
# if i in dic:
# dic[i] += 1
# else:
# dic[i] = 1
# nums = []
# for v in dic.values():
# # 某个元素的个数为1,无法分组,返回False
# if v == 1:
# return False
# nums.append(v)
# min_num = min(nums)
# j = 2
# while j <= min_num:
# for i in nums:
# if i % j == 0:
# continue
# else:
# break
# else:
# return True
# j += 1
# return False
from math import gcd
from functools import reduce
import collections
vals = collections.Counter(deck).values()
return reduce(gcd, vals) >= 2
if __name__ == "__main__":
# 测试
Solution.hasGroupsSizeX([1,2,3,4,4,3,2,1]) | [
"648008748@qq.com"
] | 648008748@qq.com |
acb8cfdf6755a61c0fe2898ce127dd07a79c5d0f | 49da4d298a14cc0db320d9716b955106d361553b | /engine/gl/window.py | 7d6d9046820560ec1d6449c7668ca5536bb4f908 | [
"Zlib"
] | permissive | alexcher-im/sgemu | d53b458dfe643520cfbf77e5412870953b1c3770 | 228d2bb6bfd13de7f7a0f9a634e765c17ecab960 | refs/heads/master | 2023-08-05T02:17:55.439317 | 2023-07-27T14:36:33 | 2023-07-27T14:49:31 | 306,878,754 | 11 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,811 | py | from glfw.GLFW import *
# import glfw
from OpenGL.GL import *
from .framebuffer import FrameBuffer
import numpy as np
from timeit import default_timer
import time
class WindowFrameBuffer(FrameBuffer):
def __init__(self, window, width, height):
super(WindowFrameBuffer, self).__new__(WindowFrameBuffer)
self.width, self.height = width, height
self.depth_buffer = None
self.stencil_depth_buffer = None
self.color_buffers = []
self.buffer_id = 0
self.window = window
self.set_viewport(width, height)
self.clear_mask = GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT
def add_depth_buffer(self, depth_buffer=None):
pass
def finish(self):
glfwSwapBuffers(self.window.window)
self.clear()
def __del__(self):
pass
class Window:
def __init__(self, res=(800, 600), resize=True, title='LearnOpenGL', version=(4, 3), alpha=False,
cursor_lock=True, depth_testing=True):
glfwInit()
# setting up glfw base
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, version[0])
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, version[1])
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE)
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE)
# glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_COMPAT_PROFILE)
if not resize:
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE)
# creating glfw window object
self.window = glfwCreateWindow(*res, title, None, None)
self.setup_opengl_context()
self.framebuffer = WindowFrameBuffer(self, *glfwGetFramebufferSize(self.window))
# setting callbacks
self._user_resize_callback = lambda window, width, height: True
self._user_key_callback = lambda window, key, scancode, action, mode: True
self._user_key_array_callback = lambda keys, dt: True
glfwSetWindowSizeCallback(self.window, self._on_resize)
glfwSetKeyCallback(self.window, self._on_keyboard_key)
self.pressed_keys = np.array([False] * 1024, 'bool')
self.framebuffer.set_clear_color((0.2, 0.3, 0.3, 1.0))
if depth_testing:
glEnable(GL_DEPTH_TEST)
if alpha:
self.enable_blending() # move this from this class
if cursor_lock:
self.set_cursor_lock()
def setup_opengl_context(self):
glfwMakeContextCurrent(self.window)
def set_key_callback(self, func):
"""
Func params:
GLFWwindow window
int key
int scancode
int action
int mode
"""
self._user_key_callback = func
def set_mouse_move_callback(self, func):
"""
Func params:
GLFWwindow window
float xpos
float ypos
"""
glfwSetCursorPosCallback(self.window, func)
def set_mouse_click_callback(self, func):
"""
Func params:
GLFWindow window
int button
int action
int mods
"""
glfwSetMouseButtonCallback(self.window, func)
def set_resize_callback(self, func):
"""
Func params:
GLFWwindow window
int width
int height
"""
self._user_resize_callback = func
def set_key_array_handler(self, func):
"""
Func params:
ndarray keys
float delta_time
"""
self._user_key_array_callback = func
def set_scroll_callback(self, func):
"""
Func params:
GLFWwindow window
float xoffset
flaot yoffset
"""
glfwSetScrollCallback(self.window, func)
def _on_resize(self, window, width, height):
self.framebuffer.width, self.framebuffer.height = width, height
self.framebuffer.set_viewport(self.framebuffer.width, self.framebuffer.height)
self._user_resize_callback(window, width, height)
def _on_keyboard_key(self, window, key, scancode, action, mode):
if action == GLFW_PRESS:
self.pressed_keys[key] = True
elif action == GLFW_RELEASE:
self.pressed_keys[key] = False
self._user_key_callback(window, key, scancode, action, mode)
@staticmethod
def enable_blending():
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def set_cursor_lock(self, mode=True):
glfwSetInputMode(self.window, GLFW_CURSOR, GLFW_CURSOR_DISABLED if mode else GLFW_CURSOR_NORMAL)
def poll_key_array(self):
self._user_key_array_callback(self.pressed_keys, 1/60) # todo remove delta time
def run_loop(self, draw_func, fps=60):
# setting timing values
delay = 1 / fps if fps > 0 else 1.0
# adding global vars to local scope
sleep = time.sleep if fps > 0 else lambda t: None
timer = default_timer
max_func = max
window = self.window
should_close = glfwWindowShouldClose
poll_events = glfwPollEvents
poll_key_array_func = self._user_key_array_callback
keys_array = self.pressed_keys
swap_buffers = glfwSwapBuffers
# main drawing loop
start = timer()
while not should_close(window):
delta_time = timer() - start
start = timer()
# polling events and drawing everything
poll_events()
poll_key_array_func(keys_array, delta_time)
self.framebuffer.clear()
draw_func(delta_time)
swap_buffers(window)
print('time: %.5fms' % ((timer() - start) * 1000), flush=True)
sleep(max_func(delay - timer() + start, 0))
| [
"43251717+alexcher-im@users.noreply.github.com"
] | 43251717+alexcher-im@users.noreply.github.com |
fd64311b77ca6f99738abb183a2ed5456138c7f6 | ea315f59c71092acbbc7cc616b4f162159a93cbf | /comm/urls.py | 55cb5aaf0d0bb564ebc83854827e2b9ef51d7462 | [] | no_license | comm833/comm833.github.io | f29f67803d7875732761b6db2f06d81dfde61207 | ad8a610e738137a255ef89a65e168389e017f580 | refs/heads/master | 2023-07-14T04:29:04.689638 | 2021-09-02T20:24:36 | 2021-09-02T20:24:36 | 402,210,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | """comm URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('accounts.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"adekomiadeleke@gmail.com"
] | adekomiadeleke@gmail.com |
5147f4ab1d3b2ba6954742a551d29532254dc5ba | 0edfa42770e780a72cf48676742224eba710384d | /apac/records/mappings/v6/__init__.py | 752d718cdcef87ff2a70908a9d8b16c0f65c5667 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | kennedy-cook/apac-invenio | 214d8619f5153339efcf8f42e29b132e402ec305 | 5179ca82be35f739ee886e1fd6426ae83fa3c278 | refs/heads/master | 2020-07-09T17:23:38.165963 | 2019-07-31T17:16:34 | 2019-07-31T17:16:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 IBM.
#
# APAC is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Mappings for Elasticsearch 5.x."""
from __future__ import absolute_import, print_function
| [
"adam.holliday@ibm.com"
] | adam.holliday@ibm.com |
62d6c486fb55ac6e7bb730bfcee37ed980f8fd83 | 5280c91e59754c12b3c94838c1bf47b926318abe | /upload/src/main/mc/utils/Utils.py | 1996214a63d84532158324781b1e02a570fc0811 | [] | no_license | nguyennhatminh-mgr/MC-Assignment3 | ecfecfc8052501207583776a99a396eedc67b7f1 | 377099f96de79823d16dbbeca0929e62fa45a2c8 | refs/heads/master | 2022-12-22T00:32:28.012051 | 2020-09-23T14:58:04 | 2020-09-23T14:58:04 | 298,004,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py |
class Utils:
def lookup(self,name,lst,func):
for x in lst:
if name == func(x):
return x
return None
| [
"nguyennhatminh12a1@gmail.com"
] | nguyennhatminh12a1@gmail.com |
a5360d5373b2aad2939d91d59a32d5f0daa935fe | 46a1cf8dde43ca7401f5b836263627a6009f10f0 | /pylibrets/__init__.py | b271550f342629c9b1053823fe1615fd280aceb2 | [
"MIT"
] | permissive | wkdglitch/pylibrets | abb059fa78cb7e9270a0de32ff42a307e964d29a | ee7ae95a9a7c8809c4bc6d269c52518d672cbf82 | refs/heads/master | 2021-01-19T02:25:18.796056 | 2016-06-19T03:37:50 | 2016-06-19T03:37:50 | 50,259,058 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,629 | py | __title__ = 'pylibRETS'
__version__ = '0.1.0'
Version = __version__ # for backware compatibility
import requests
from xml.etree import ElementTree
#from urllib import urlparse, urljoin
from urlparse import urlparse, urljoin
import socket
import hashlib
import time
import urllib
from .models import (
MetadataResource, MetadataClass, MetadataTable, MetadataLookup,
MetadataLookupType, MetadataObject, LoginResponse
)
from .exceptions import (
LoginException, GetObjectException, SearchException,
GetMetadataException, NoLoginException, RetsException
)
from .meta_parser import MetadataParser, StandardXmlMetadataParser
from .rets_parser import SearchResultSet, CompactResultSetParser
RETS_1_0 = "RETS/1.0"
RETS_1_5 = "RETS/1.5"
RETS_1_7 = "RETS/1.7"
RETS_1_7_2 = "RETS/1.7.2"
RETS_1_8 = "RETS/1.8"
RETS_1_8_0 = "RETS/1.8.0"
COMPACT = "COMPACT"
COMPACT_DECODED = "COMPACT-DECODED"
STANDARD_XML = "STANDARD-XML"
NO_COUNT = 0
INCLUDE_COUNT = 1
COUNT_ONLY = 2
HTTP_EXPECT_HEADER = "Expect"
RETS_REQUEST_ID_HEADER = "RETS-Request-ID"
RETS_SESSION_ID_HEADER = "RETS-Session-ID"
RETS_SERVER_HEADER = "RETS-Server"
RETS_VERSION_HEADER = "RETS-Version"
RETS_UA_AUTH_HEADER = "RETS-UA-Authorization"
RETS_REQUEST_ID_HEADER = "RETS-Request-ID"
RETS_SESSION_ID_HEADER = "RETS-Session-ID"
RETS_USER_AGENT_HEADER = "User-Agent"
RETS_XML_DEFAULT_ENCODING = "US-ASCII"
RETS_XML_ISO_ENCODING = "iso-8859-1"
RETS_XML_UTF8_ENCODING = "UTF-8"
DEFAULT_USER_AGENT = "%s/%s" % (__title__, __version__)
DEFAULT_RETS_VERSION = RETS_1_5
SEARCH_REQUEST_FORMAT_PARAMETER = "Format"
SEARCH_REQUEST_STANDARD_NAMES_PARAMETER = "StandardNames"
SEARCH_REQUEST_QUERY_TYPE_PARAMETER = "QueryType"
SEARCH_REQUEST_SEARCH_TYPE_PARAMETER = "SearchType"
SEARCH_REQUEST_CLASS_PARAMETER = "Class"
SEARCH_REQUEST_QUERY_PARAMETER = "Query"
SEARCH_REQUEST_SELECT_PARAMETER = "Select"
SEARCH_REQUEST_COUNT_PARAMETER = "Count"
SEARCH_REQUEST_LIMIT_PARAMETER = "Limit"
SEARCH_REQUEST_OFFSET_PARAMETER = "Offset"
SEARCH_REQUEST_RESTRICTED_PARAMETER = "RestrictedIndicator"
SEARCH_REQUEST_PAYLOAD_PARAMETER = "Payload"
class RetsSession(object):
def __init__(self, login_url, user, passwd, user_agent = None, user_agent_passwd = None, rets_version = DEFAULT_RETS_VERSION):
self.rets_ua_authorization = None
self.user = user
self.passwd = passwd
self.user_agent = DEFAULT_USER_AGENT if len(user_agent) == 0 else user_agent
self.user_agent_passwd = user_agent_passwd
self.rets_version = rets_version
self.base_url = self._get_base_url(login_url)
self.login_url = login_url
self._session = None
self.logged_in = False
self.rets_error = None
self.server_info = None
self.detected_rets_version = None
self.result_format = COMPACT_DECODED
self.result_count = INCLUDE_COUNT
self.debug = DEFAULT_USER_AGENT
def __del__(self):
self.Logout()
self._session = None
def _set_rets_ua_authorization(self):
self._session.headers[RETS_UA_AUTH_HEADER] = self.rets_ua_authorization;
def _calculate_rets_ua_authorization(self, sid, user_agent, user_agent_passwd, rets_version):
product = user_agent
#a1hashed = hashlib.md5(bytes(product + ':' + user_agent_passwd, 'utf-8')).hexdigest()
a1hashed = hashlib.md5(bytes(product + ':' + user_agent_passwd)).hexdigest()
retsrequestid = ''
retssessionid = sid
#digestHash = hashlib.md5(bytes(a1hashed + ':' + retsrequestid + ':' + retssessionid + ':' + rets_version, 'utf-8')).hexdigest()
digestHash = hashlib.md5(bytes(a1hashed + ':' + retsrequestid + ':' + retssessionid + ':' + rets_version)).hexdigest()
return 'Digest ' + digestHash
def _get_code_text(self, response_xml):
xml_obj = ElementTree.fromstring(response_xml)
reply_code = xml_obj.attrib['ReplyCode']
reply_text = xml_obj.attrib['ReplyText']
return reply_code, reply_text
def _get_base_url(self, url_str):
url_parts = urlparse(url_str)
resURL = url_parts.scheme + "://" + url_parts.netloc
return resURL
def _parse_login_response(self, login_resp):
reply_code, reply_text = self._get_code_text(login_resp)
if reply_code != '0':
raise LoginException(reply_code + "," + reply_text)
login_xml = ElementTree.fromstring(login_resp)
if len(login_xml) > 0:
rets_info = login_xml[0].text.split('\n')
else:
# for servers which don't have RETS-RESPONSE node
rets_info = login_xml.text.split('\n')
rets_info_dict = {}
for info_item in rets_info:
if info_item.strip():
key_value_pair = info_item.split('=')
rets_info_dict[key_value_pair[0].strip()] = key_value_pair[1].strip()
return rets_info_dict
def _parse_getobject_response(self, response):
reply_code, reply_text = self._get_code_text(response)
if reply_code != '0':
raise GetObjectException(reply_code + "," + reply_text)
def _parse_search_response(self, response):
if not response:
raise SearchException('Empty response')
reply_code, reply_text = self._get_code_text(response)
if reply_code not in ['0']:
raise SearchException(reply_code + "," + reply_text)
def _parse_getmetadata_response(self, response):
reply_code, reply_text = self._get_code_text(response)
if reply_code != '0':
raise GetMetadataException(reply_code + "," + reply_text)
def _get_object(self, obj_type, resource , obj_id):
if self.user_agent_passwd:
self._set_rets_ua_authorization()
getobject_url = urljoin(self.base_url, self.server_info['GetObject'])
getobject_response = self._session.get(getobject_url + "?Type=%s&Resource=%s&ID=%s" % (obj_type, resource, obj_id))
getobject_response.raise_for_status()
if getobject_response.headers['content-type'] == 'text/plain':
self._parse_getobject_response(getobject_response.text)
return getobject_response.content
def GetLoginUrl(self):
return self.login_url
def Login(self):
try:
self._session = requests.session()
headers = {'Accept': "*/*",
RETS_USER_AGENT_HEADER: self.user_agent,
RETS_VERSION_HEADER: self.rets_version}
if self.user_agent_passwd:
headers[RETS_UA_AUTH_HEADER] = self._calculate_rets_ua_authorization(
''
, self.user_agent
, self.user_agent_passwd
, self.rets_version)
self._session.headers = headers
self._session.auth = requests.auth.HTTPDigestAuth(self.user, self.passwd)
response = self._session.get(self.login_url)
response.raise_for_status()
self.server_info = self._parse_login_response(response.text)
self.server_info[RETS_SERVER_HEADER] = response.headers[RETS_SERVER_HEADER]
self.server_info[RETS_VERSION_HEADER] = response.headers[RETS_VERSION_HEADER]
if self.user_agent_passwd:
self.rets_ua_authorization = self._calculate_rets_ua_authorization(
response.cookies[RETS_SESSION_ID_HEADER]
, self.user_agent
, self.user_agent_passwd
, self.rets_version)
self.logged_in = True
except Exception, e:
self.rets_error = e.message
return self.logged_in
def GetLoginResponse(self):
pass
def GetCapabilityUrls(self):
pass
def GetAction(self):
pass
def CreateSearchRequest(self):
pass
def Logout(self):
try:
if not self.logged_in:
raise NoLoginException("You are not logged in")
if self.user_agent_passwd:
self._set_rets_ua_authorization()
logout_url = urljoin(self.base_url, self.server_info['Logout'])
logout_response = self._session.get(logout_url)
logout_response.raise_for_status()
except Exception, e:
self.rets_error = e.message
def IsLoggedIn(self):
if not self.logged_in:
self.Login()
return self.logged_in
def Test(self):
if self.Login():
self.Logout()
return True
else:
return False
def GetObject(self, obj_type, resource , obj_id):
if not self.logged_in:
raise NoLoginException("You need to call login before getobject")
for i in range(3):
try:
return self._get_object(obj_type, resource , obj_id)
except socket.timeout:
if i < 3:
print('timeout, try again')
time.sleep(5)
else:
raise
def GetMetadata(self):
if not self.logged_in:
raise NoLoginException("You need to call login before getmetadata")
if self.user_agent_passwd:
self._set_rets_ua_authorization()
get_meta_url = urljoin(self.base_url, self.server_info['GetMetadata'])
response = self._session.get(get_meta_url + '?Type=METADATA-SYSTEM&ID=*&Format=STANDARD-XML')
response.raise_for_status()
self._parse_getmetadata_response(response.text)
return StandardXmlMetadataParser(response.text)
def Search(self, resource, search_class, query, select = None, limit = None, offset = None):
if not self.logged_in:
raise NoLoginException("You need to call login before search")
if self.user_agent_passwd:
self._set_rets_ua_authorization()
if limit:
limit = 'NONE'
params = {}
params.setdefault(SEARCH_REQUEST_SEARCH_TYPE_PARAMETER, resource)
params.setdefault(SEARCH_REQUEST_CLASS_PARAMETER, search_class)
params.setdefault(SEARCH_REQUEST_QUERY_PARAMETER, query)
params.setdefault(SEARCH_REQUEST_QUERY_TYPE_PARAMETER, 'DMQL2')
params.setdefault(SEARCH_REQUEST_STANDARD_NAMES_PARAMETER, '0')
params.setdefault(SEARCH_REQUEST_COUNT_PARAMETER, self.result_count)
params.setdefault(SEARCH_REQUEST_FORMAT_PARAMETER, self.result_format)
params.setdefault(SEARCH_REQUEST_LIMIT_PARAMETER, limit)
if offset is not None:
params.setdefault(SEARCH_REQUEST_OFFSET_PARAMETER, offset)
if select is not None:
params.setdefault(SEARCH_REQUEST_SELECT_PARAMETER, select)
search_url = urljoin(self.base_url, self.server_info['Search'])
search_response = self._session.post(search_url, params)
search_response.raise_for_status()
self._parse_search_response(search_response.text)
return CompactResultSetParser(search_response.text)
| [
"fernandoherrera.adm@gmail.com"
] | fernandoherrera.adm@gmail.com |
eafd52d1be93441c568fdd4602cc7c11d0b12704 | 6b8e14157141e411540f6fbfc2e314be18a1ab08 | /app.py | a7bb036e6330eb4fc0af6b2816c997f9c130a97e | [] | no_license | mailup/rest-samples-python | 7ca66095779f579e004d6090fa5f55b3d3da4218 | 7bb792637626856529ee2172a46ab2bb8a122a95 | refs/heads/master | 2020-04-18T07:48:44.640725 | 2019-01-25T16:51:33 | 2019-01-25T16:51:33 | 167,373,062 | 1 | 1 | null | 2019-01-25T15:15:23 | 2019-01-24T13:35:07 | null | UTF-8 | Python | false | false | 3,745 | py | import os
from flask import Flask, render_template, request, make_response
from mail_up_client import MailUpClient, MailUpException
from examples import example_names
app = Flask(__name__)
app.config.from_object(os.environ.get('APP_SETTINGS', 'config.Config'))
mail_up = MailUpClient(config=app.config)
example_results = [None for x in range(len(example_names))]
example_errors = [None for x in range(len(example_names))]
def login():
global example_errors, example_results
username = request.form.get('username')
password = request.form.get('password')
try:
mail_up.retrieve_access_token(username, password)
example_results = [None for x in range(len(example_names))]
example_errors = [None for x in range(len(example_names))]
except MailUpException:
pass
def login_with_code():
global example_errors, example_results
code = request.args.get('code')
try:
mail_up.retrieve_access_token_with_code(code)
example_results = [None for x in range(len(example_names))]
example_errors = [None for x in range(len(example_names))]
except MailUpException:
pass
def refresh_token():
mail_up.refresh_access_token()
@app.before_request
def before_request():
mail_up.load_token(cookies=request.cookies)
if mail_up.get_token_time() <= 0:
mail_up.clear_tokens()
@app.after_request
def after_request(response):
if mail_up.access_token:
response.set_cookie('access_token', mail_up.access_token)
response.set_cookie('refresh_token', mail_up.refresh_token)
response.set_cookie('token_time', str(mail_up.token_time))
return response
@app.route('/', methods=['GET', 'POST'])
def index():
execute_result = None
if request.form.get('logon_by_password'):
login()
elif request.args.get('code'):
login_with_code()
elif request.form.get('refresh_token'):
refresh_token()
elif request.form.get('logon_by_key'):
return mail_up.logon()
elif request.form.get('execute_request'):
uri = request.form.get('url') + request.form.get('endpoint')
try:
execute_result = mail_up.call_method(
method=request.form.get('method'),
content_type=request.form.get('content_type'),
url=uri,
body=request.form.get('body'),
)
except MailUpException as e:
pass
for number, name in enumerate(example_names):
if request.form.get(f'run_example_{number + 1}'):
method_to_call = getattr(mail_up, f'example_{number + 1}')
example_results[number] = [True]
try:
example_results[number] = method_to_call()
except MailUpException as e:
example_errors[number] = dict()
example_errors[number]['code'] = e.code
example_errors[number]['message'] = e.error
example_errors[number]['url'] = mail_up.error_url
authorization_status = 'Authorized' if mail_up.access_token else 'Unauthorized'
resp = make_response(
render_template(
'index.html',
authorization_status=authorization_status,
access_token=mail_up.access_token,
token_time=mail_up.get_token_time(),
execute_result=execute_result,
example_results=example_results,
example_errors=example_errors,
endpoints={
'Console': mail_up.console_endpoint,
'MailStatistics': mail_up.mail_statistics_endpoint,
},
examples=example_names,
)
)
return resp
if __name__ == '__main__':
app.run()
| [
"vladimir.khramkov@azati.com"
] | vladimir.khramkov@azati.com |
9836a0216936a52972c37b5425d466bc4f03d472 | c9436a4f14ec6b3c86cf0f69d82afacda9d6c7f3 | /geni/request.py | 83bb12970653fb48115341cd13f958efeb961e67 | [] | no_license | KDahlgren/ceph-deploy-popper | a04e8785e8d7c086c2940e91e3e2a85c4c059f5d | bdff9766c170132433e53a0799dd38f0e3b4a4d2 | refs/heads/master | 2020-06-29T02:02:09.796534 | 2019-08-06T05:51:49 | 2019-08-06T05:51:49 | 200,405,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | import os
from geni.aggregate import cloudlab
from geni.rspec import pg
from geni import util
from random import randint
def baremetal_node(name, img, hardware_type):
node = pg.RawPC(name)
node.disk_image = img
node.hardware_type = hardware_type
return node
experiment_name = 'popper-test' + str( randint( 0, 9999) )
img = "urn:publicid:IDN+wisconsin.cloudlab.us+image+emulab-ops//UBUNTU18-64-STD"
#img = "urn:publicid:IDN+wisconsin.cloudlab.us+image+emulab-ops//ceph-deploy-4osds"
request = pg.Request()
request.addResource(baremetal_node("client0", img, 'c220g5'))
request.addResource(baremetal_node("osd0", img, 'c220g5'))
request.addResource(baremetal_node("osd1", img, 'c220g5'))
#request.addResource(baremetal_node("osd2", img, 'c220g5'))
#request.addResource(baremetal_node("osd3", img, 'c220g5'))
# load context
ctx = util.loadContext(key_passphrase=os.environ['GENI_KEY_PASSPHRASE'])
# create slice
util.createSlice(ctx, experiment_name, renew_if_exists=True)
# create sliver on emulab
manifest = util.createSliver(ctx, cloudlab.Wisconsin, experiment_name, request)
# grouping inventory
groups = {
#'osds': ['osd0','osd1','osd2','osd3'],
'osds': ['osd0','osd1'],
'clients': ['client0']
}
# output files: ansible inventory and GENI manifest
outdir = os.path.dirname(os.path.realpath(__file__))
util.toAnsibleInventory(manifest, groups=groups, hostsfile=outdir+'/hosts')
manifest.writeXML(outdir+'/manifest.xml')
| [
"kmdahlgr@ucsc.edu"
] | kmdahlgr@ucsc.edu |
a889ec28ea503f4ebfa36d0e080c67063defabd2 | 2737099c5b3da8df1736364902889ba5fec08beb | /level_four/learning_templates/basic_app/views.py | 4184a153ced9d7973a8fb3953910783d1ad7bf0c | [] | no_license | sidharth01g/LearnDjango | f0116286344b75916d4c6f6c0815f77791a2283e | 7260cbae267e91bece694aabe21458f62264173e | refs/heads/master | 2020-03-23T18:31:22.214366 | 2018-08-18T12:36:54 | 2018-08-18T12:36:54 | 141,914,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | from django.shortcuts import render
from django.http import HttpResponse, HttpRequest
# Create your views here.
def index(request: HttpRequest) -> HttpResponse:
context = {
'text': 'hello world!',
'number': 1234,
}
return render(request=request, template_name='basic_app/index.html', context=context)
def other(request: HttpRequest) -> HttpResponse:
context = {
}
return render(request=request, template_name='basic_app/other.html', context=context)
def relative(request: HttpRequest) -> HttpResponse:
context = {
}
return render(request=request, template_name='basic_app/relative_url_templates.html', context=context)
| [
"sidharth01g@gmail.com"
] | sidharth01g@gmail.com |
f35a5128a2f4e22df0707fae7e9f4bde1f798c9a | 1de2977d733a05ed08c16792ab5e9735e81b2fcd | /probono_main/urls.py | ad5e4fb866f8c99f0521e163c9619abd8fb9295b | [] | no_license | dchouzer/ProBono | a04505c4d00a4a3f3209f7b045f268cb171404bc | e0acdc9ea488141f5e2f18ac5b41cd0135ac0184 | refs/heads/master | 2016-09-11T09:21:41.403041 | 2015-03-06T04:43:03 | 2015-03-06T04:43:03 | 31,639,293 | 0 | 0 | null | 2015-03-04T05:42:21 | 2015-03-04T05:14:17 | null | UTF-8 | Python | false | false | 281 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'probono_main.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| [
"david.p.chou@duke.edu"
] | david.p.chou@duke.edu |
aad2c3ec378cad1fce5bd7264d58838ff5d417ad | a57f55fb1ebc044342698a903acb114397d8e673 | /REMproxy/website/rem_proxy/urls.py | 7cc541e5673ccd23c6c71da8aba7136f4ebebc1c | [] | no_license | ekremcet/REMProxyServer | 4be22372061d96241b2afc32461f04e0806bc268 | 5c44f47002dddad3400e937712dd8e84ff6924f0 | refs/heads/master | 2020-05-04T17:59:20.992219 | 2019-05-18T11:55:23 | 2019-05-18T11:55:23 | 179,334,879 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | from django.contrib import admin
from django.urls import path, include
from django.views.generic import TemplateView
from django.conf.urls import url
urlpatterns = [
path('', include('pages.urls')),
]
| [
"ekrem.cetinkaya@ozu.edu.tr"
] | ekrem.cetinkaya@ozu.edu.tr |
06df66d95f83dce16f1a62cdb9bfc29ba275e9f5 | 208760460898e0280fe33bd240293cbe7e147028 | /manimlib/mobject/svg/brace.py | 24605570db36432ce408eb9952c288d5c3313d4e | [] | no_license | Surya291/Video_animations_using_python | cba67edf6648911a8cd7a0ec86afd29133c09cc7 | ffead6aada5b17e664a97b5e8773683794a2e1a7 | refs/heads/master | 2022-12-20T03:23:43.163327 | 2020-06-30T18:27:40 | 2020-06-30T18:27:40 | 276,082,631 | 1 | 1 | null | 2022-11-28T01:26:05 | 2020-06-30T11:42:16 | Python | UTF-8 | Python | false | false | 4,787 | py | import numpy as np
from manimlib.animation.composition import AnimationGroup
from manimlib.constants import *
from manimlib.animation.fading import FadeIn
from manimlib.animation.growing import GrowFromCenter
from manimlib.mobject.svg.tex_mobject import TexMobject
from manimlib.mobject.svg.tex_mobject import TextMobject
from manimlib.mobject.types.vectorized_mobject import VMobject
from manimlib.utils.config_ops import digest_config
from manimlib.utils.space_ops import get_norm
import copy
class Brace(TexMobject):
CONFIG = {
"buff": 0.2,
"width_multiplier":2 ,
"max_num_quads": 15,
"min_num_quads": 0,
"background_stroke_width": 0,
}
def __init__(self, mobject, direction=DOWN, **kwargs):
digest_config(self, kwargs, locals())
angle = -np.arctan2(*direction[:2]) + np.pi
mobject.rotate(-angle, about_point=ORIGIN)
left = mobject.get_corner(DOWN + LEFT)
right = mobject.get_corner(DOWN + RIGHT)
target_width = right[0] - left[0]
# Adding int(target_width) qquads gives approximately the right width
num_quads = np.clip(
int(self.width_multiplier * target_width),
self.min_num_quads, self.max_num_quads
)
tex_string = "\\underbrace{%s}" % (num_quads * "\\qquad")
TexMobject.__init__(self, tex_string, **kwargs)
self.tip_point_index = np.argmin(self.get_all_points()[:, 1])
self.stretch_to_fit_width(target_width)
self.shift(left - self.get_corner(UP + LEFT) + self.buff * DOWN)
for mob in mobject, self:
mob.rotate(angle, about_point=ORIGIN)
def put_at_tip(self, mob, use_next_to=True, **kwargs):
if use_next_to:
mob.next_to(
self.get_tip(),
np.round(self.get_direction()),
**kwargs
)
else:
mob.move_to(self.get_tip())
buff = kwargs.get("buff", DEFAULT_MOBJECT_TO_MOBJECT_BUFFER)
shift_distance = mob.get_width() / 2.0 + buff
mob.shift(self.get_direction() * shift_distance)
return self
def get_text(self, *text, **kwargs):
text_mob = TextMobject(*text)
self.put_at_tip(text_mob, **kwargs)
return text_mob
def get_tex(self, *tex, **kwargs):
tex_mob = TexMobject(*tex)
self.put_at_tip(tex_mob, **kwargs)
return tex_mob
def get_tip(self):
# Very specific to the LaTeX representation
# of a brace, but it's the only way I can think
# of to get the tip regardless of orientation.
return self.get_all_points()[self.tip_point_index]
def get_direction(self):
vect = self.get_tip() - self.get_center()
return vect / get_norm(vect)
class BraceLabel(VMobject):
CONFIG = {
"label_constructor": TexMobject,
"label_scale": 1,
}
def __init__(self, obj, text, brace_direction=DOWN, **kwargs):
VMobject.__init__(self, **kwargs)
self.brace_direction = brace_direction
if isinstance(obj, list):
obj = VMobject(*obj)
self.brace = Brace(obj, brace_direction, **kwargs)
if isinstance(text, tuple) or isinstance(text, list):
self.label = self.label_constructor(*text, **kwargs)
else:
self.label = self.label_constructor(str(text))
if self.label_scale != 1:
self.label.scale(self.label_scale)
self.brace.put_at_tip(self.label)
self.submobjects = [self.brace, self.label]
def creation_anim(self, label_anim=FadeIn, brace_anim=GrowFromCenter):
return AnimationGroup(brace_anim(self.brace), label_anim(self.label))
def shift_brace(self, obj, **kwargs):
if isinstance(obj, list):
obj = VMobject(*obj)
self.brace = Brace(obj, self.brace_direction, **kwargs)
self.brace.put_at_tip(self.label)
self.submobjects[0] = self.brace
return self
def change_label(self, *text, **kwargs):
self.label = self.label_constructor(*text, **kwargs)
if self.label_scale != 1:
self.label.scale(self.label_scale)
self.brace.put_at_tip(self.label)
self.submobjects[1] = self.label
return self
def change_brace_label(self, obj, *text):
self.shift_brace(obj)
self.change_label(*text)
return self
def copy(self):
copy_mobject = copy.copy(self)
copy_mobject.brace = self.brace.copy()
copy_mobject.label = self.label.copy()
copy_mobject.submobjects = [copy_mobject.brace, copy_mobject.label]
return copy_mobject
class BraceText(BraceLabel):
CONFIG = {
"label_constructor": TextMobject
}
| [
"surya020901@gmail.com"
] | surya020901@gmail.com |
68a9a3db891fe5ddad7993a219e500c9af72b10f | 169568bfed14d62011a76a92a1bff32e197ae266 | /rango/migrations/0003_auto_20160208_2113.py | 2d31bf0a8195531235806d7aa62b62417222d80b | [] | no_license | sapit/tango_with_django | 57c6eb4a8c7027409c7728b13a1db682b5b57378 | 9308684abc39b63e2faa75bb7d77927ac5a55dc2 | refs/heads/master | 2021-01-10T04:00:15.297981 | 2016-03-11T16:05:17 | 2016-03-11T16:05:17 | 50,595,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('rango', '0002_auto_20160208_2015'),
]
operations = [
migrations.AddField(
model_name='category',
name='slug',
field=models.SlugField(default=''),
preserve_default=False,
),
migrations.AlterField(
model_name='page',
name='category',
field=models.ForeignKey(related_name=b'pages', to='rango.Category'),
),
]
| [
"zdravkozzz13@gmail.com"
] | zdravkozzz13@gmail.com |
a437ef54c29f588168f8b297bc757e58949b15ab | 3f2c0e7806a89f4a58b273e9dc1dd66f4e9ea7ef | /evaluation/models.py | 030c878eaa0671fb64fcf1e660cdb4176727ea84 | [] | no_license | blaircalderwood/masterWebApp | 45b7d73a7e1082de04240a49e11b9009f47af594 | 2dee7fd8eeb24831e3f7d6d2e08b621ec091ac7e | refs/heads/master | 2021-01-12T19:51:15.307701 | 2016-09-10T22:53:09 | 2016-09-10T22:53:09 | 66,781,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | from __future__ import unicode_literals
from django.db import models
class Rating(models.Model):
system_choice = models.CharField(max_length=20, unique=False)
selected_1 = models.IntegerField(max_length=1, default=0)
selected_2 = models.IntegerField(max_length=1, default=0)
selected_3 = models.IntegerField(max_length=1, default=0)
selected_4 = models.IntegerField(max_length=1, default=0)
selected_5 = models.IntegerField(max_length=1, default=0)
def save(self, *args, **kwargs):
super(Rating, self).save(*args, **kwargs)
def __unicode__(self):
return self.system_choice
class UserImage(models.Model):
img = models.ImageField(upload_to='user_images')
tag = models.CharField(max_length=100)
img_name = models.CharField(max_length=50)
def save(self, *args, **kwargs):
self.img_name = self.img.name
self.tag = str(self.tag).lower().strip()
super(UserImage, self).save(*args, **kwargs)
def __unicode__(self):
return self.img.name
| [
"blaircalderwood@live.co.uk"
] | blaircalderwood@live.co.uk |
fd4ad87877eb340cfaf846cb264685b72c187840 | 412bab421bf334f7a4195751a36cd1b267a49794 | /clustering/kmeans.py | e6fd0dc13c32fe874506df6312b6a34e131e8fa7 | [] | no_license | wei-group/basic-algorithms | e6f58763c2b92b140e367763f9e97a18d72b51c3 | e428d4526af2dfb65db5745f0bb5eedb562e5029 | refs/heads/master | 2020-07-02T14:37:02.814673 | 2019-08-10T01:12:03 | 2019-08-10T01:12:03 | 201,559,397 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,465 | py | # 2019-08-07
# author: Ryb
# 尝试自己实现kmeans聚类算法
# kmeans算法
# 1. 初始化k个族中心
# 2. 计算每个样本与k个族中心距离,标记样本为距离最短的族
# 3. 重新确定K族中心(族中平均位置)
# 4. 循环2-3,直到前后两次所有族中心距离变化<eps
"""
# 输入:
X: 2d数组, 形如(n_samples, m_features),n_samples表示样本数,m_features表示特征维数
K: int, 超参数,指定族格式
metric:str, 距离类型,默认为欧式距离'Euler',其他暂时为实现
eps: float, 精度(当族中心位置更新变化<eps时停止)
random_state: 随机种子
# 输出:
centers: K族中心向量, 2d数组, 形如(K, m_features)
pred: 1-d数组,长度为n_samples
"""
import numpy as np
import random # 用python的random模块,不用numpy的random
class kmeans: # 创建kmeans类
# 初始化函数
def __init__(self, X=None, K=2, metric='Euler', eps=1e-6, init_centers=None, random_state=None):
self.X = X
self.K = K
self.metric = metric
self.eps = eps
self.centers = init_centers
self.random_state = random_state
# if not self.centers and not self.X:
# if random_state is not None:
# random.seed(random_state)
# idx = random.sample(range(self.X.shape[0]), self.K)
# self.centers = self.X[idx,:]
# 距离函数
def calc_dist(self, x, c):
"""
# 如果主样本与单中心计算欧式距离,返回 np.sqrt(np.power(x-c,2)).sum() 即可;
# 考虑到扩展其他距离计算方式,采用用闵可夫斯基距离,当lp=2时候即为欧式距离
# 单样本-单中心的距离计算,返回dist.sum()
# 单样本-多中心的距离计算,返回dist.sum(axis=1)
"""
if self.metric=='Euler':
lp = 2
dist = np.power(np.power(x-c,lp), 1/lp)
if len(dist.shape)==1:
return dist.sum() # 单样本,单中心
else:
return dist.sum(axis=1) # 单样本,多中心
# 迭代(训练)
def fit(self, X):
# 样本
if X is not None:
self.X = X
# 样本形状
n_samples, n_features = self.X.shape
# 设置随机种子
if self.random_state is not None:
random.seed(self.random_state)
# 初始化聚类中心
if self.centers is None:
"""
# idx = np.random.randint(low=0, hight=n_sample,size=self.K)
# 用randint初始化,有重复;重复的族中心,会导致族中分配不到成员,求均值NaN
# 更新的族中心后,中心向量NaN
#
"""
idx = idx = random.sample(range(n_samples), self.K)
self.centers = X[idx,:]
# 初始样本的族标记-1
pred = np.array([-1]*n_samples)
iter = 0
stop = False # 结束标志
while (not stop):
iter +=1
print(iter)
# 遍历所有样本,划分族
# for i in range(n_samples):
# min_dist = np.inf
# c = -1
# # 遍历所有族中心向量
# for k in range(self.K):
# dist = self.calc_dist(X[i,:], self.centers[k,:])
# if dist < min_dist:
# min_dist = dist
# c = k
# pred[i] =c
for i in range(n_samples):
dists = self.calc_dist(X[i,:], self.centers)
pred[i] = np.argmin(dists)
# 重新确定族中心
new_centers = np.zeros((self.K, n_features))
for k in range(self.K):
new_centers[k,:] = X[pred==k,:].mean(axis=0)
# 判断停止条件
delta = abs(new_centers - self.centers)
flg = delta <self.eps
stop = flg.all()
self.centers = new_centers
return pred, self.centers
# 族预测
def predict(self, X):
# 遍历所有样本,划分族
pred = np.array([-1]*n_samples)
for i in range(n_samples):
dists = self.calc_dist(X[i,:], self.centers)
pred[i] = np.argmin(dists)
return pred
if __name__ == "__main__":
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
# 生成数据
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# 调用kmeans
model = kmeans(K=3, eps=1e-3, random_state=1)
pred, centers = model.fit(X)
n_samples, _ = X.shape
# 族预测,如果仅是训练数据,直接用fit(X)返回的族划分
# pred = model.predict(X)
plt.scatter(X[:, 0], X[:, 1], c=pred)
plt.title("kmeans")
plt.show()
| [
"noreply@github.com"
] | wei-group.noreply@github.com |
32b5d6deefadeeec4ae718e1911b9a8d8c500d12 | 0fcf8bae20849bdc4514a94c672ac79ecfff670b | /venv_home/bin/rst2latex.py | 87b9cb0f63aa97a0098bca4be097099581bb1097 | [] | no_license | CurryXuGoGo/myblog | 5322e15bd480bfab521b19558e9c6b46c65f6742 | 26fbcc436a3d20598ea1922756dd69664fb3453f | refs/heads/master | 2021-01-19T17:26:25.130493 | 2017-02-22T06:01:04 | 2017-02-22T06:01:04 | 82,458,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | #!/home/curry/myproject/curry_home/venv_home/bin/python2
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
| [
"canyuxu@163.com"
] | canyuxu@163.com |
817855407a326bbddc2d810692e4fdf3e51286f1 | 30f89cd54ed3ad864b0a090a7ee5fb006fd22015 | /posts/router.py | b93fe38b4f86e690adb68df0d1d9ef4da18e13d0 | [] | no_license | 5hy4m/SimpleBolg-api | 888f51d7c4c3db0758413c00005e393303b582fc | 5597e91d43bf0c72f809443773a0b9f99385c872 | refs/heads/master | 2022-04-17T10:45:32.906898 | 2020-04-11T13:01:33 | 2020-04-11T13:01:33 | 254,868,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | from .views import *
from django.urls import path
from rest_framework import routers
from rest_framework.urlpatterns import format_suffix_patterns
router = routers.DefaultRouter()
router.register('register',CreateUserViewset)
router.register('posts',PostsViewset)
router.register('likes',LikeViewset)
router.register('comments',CommentViewset)
router.register('replies',ReplyViewset)
urlpatterns = [
path('get-users/', UsersView.as_view(), name='get_users'),
path('logout/', Logout.as_view(), name='logout'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
urlpatterns += router.urls
| [
"vcsshyam1998@gmail.com"
] | vcsshyam1998@gmail.com |
6a0b21840339c3f21e8500025fb762066f982809 | 82baa0aa95c3d616299ec64aaa99b2efe973b9fe | /lists/views.py | c732cf295652c2b27fde7f26d400e3c2f5dd1f8f | [] | no_license | adamatus/goat-goat-goat | 09bbea320f926e30f46278d7cae348015606ef68 | 6934323df9b91be9010dfa02dfa90f88275d4986 | refs/heads/master | 2021-01-21T12:43:32.037662 | 2015-06-30T20:40:29 | 2015-06-30T20:40:29 | 38,329,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | from django.http import HttpResponse
from django.shortcuts import render, redirect
from lists.models import Item, List
def home_page(request):
return render(request, 'home.html')
def view_list(request, list_id):
list_ = List.objects.get(id=list_id)
items = Item.objects.filter(list=list_)
return render(request, 'list.html', {'list': list_})
def new_list(request):
list_ = List.objects.create()
Item.objects.create(text=request.POST['item_text'], list=list_)
return redirect('/lists/%d/' % (list_.id))
def add_item(request, list_id):
list_ = List.objects.get(id=list_id)
Item.objects.create(text=request.POST['item_text'], list=list_)
return redirect('/lists/%d/' % (list_.id))
| [
"Adam.Riggall@nike.com"
] | Adam.Riggall@nike.com |
c01d94ae1e7fc315ee743584b9c872e675ccc16f | 00e58944bb5e1f14e427887abffe9f8a7f63da0b | /Неделя 5/больше_предыдущего.py | 5d3ddde4ed119790635708b4ae63c9973c94202d | [] | no_license | homosociologicus/coursera_py_basics | 323367e0097a06491030080992c06d6565230568 | 795965b83b509697d7e74cb701aba92f3fb13de6 | refs/heads/master | 2022-11-21T19:53:06.665399 | 2020-07-26T20:55:01 | 2020-07-26T20:55:01 | 262,435,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | nums = list(map(int, input().split()))
for i, num in enumerate(nums):
if i > 0 and num > nums[i - 1]:
print(num, end=' ')
| [
"56775261+homosociologicus@users.noreply.github.com"
] | 56775261+homosociologicus@users.noreply.github.com |
cb6b6633f9e9bcb3190db064952f56f28a09c73e | fc5932adb6ebf234542ba66772f9bb1594cdc587 | /first_project/first_app/migrations/0017_auto_20210423_2007.py | 806f9471b6251dd2e6be858ed032394dec380c17 | [] | no_license | sagunrupakheti/Online-Examination-System-for-User-Reliability-and-Secure-Authentication | bda7dd907229cf583b0b689b57c1b09f07fce1c4 | dbe758d6ba23a79999f5e640546eb154bdf0686e | refs/heads/master | 2023-06-19T10:31:42.941190 | 2021-07-12T15:28:23 | 2021-07-12T15:28:23 | 385,242,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | # Generated by Django 3.1.6 on 2021-04-23 14:22
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('first_app', '0016_auto_20210423_2006'),
]
operations = [
migrations.AlterField(
model_name='examination',
name='exactTimeStart',
field=models.TimeField(default=datetime.time(20, 7, 36, 479832)),
),
migrations.CreateModel(
name='FinalResult',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, unique=True)),
('total_marks_obtained', models.CharField(max_length=10)),
('grade', models.CharField(max_length=5)),
('exam_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='first_app.examination')),
('student_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='first_app.userprofileinfo')),
],
),
]
| [
"sagunrupakheti@gmail.com"
] | sagunrupakheti@gmail.com |
e4170ac5c4fafae22d46de35b0b75f1e199cfcb7 | e613aad81f98a23e5245e3bd15c6401a86038bec | /cupon/migrations/0003_auto__add_field_promocion_descripcion.py | 43983366654e696bdecbd623b2671bed219bbf09 | [] | no_license | zykorwx/cupones | 9fc7a84d25bfc9fd187983e01623bee1ac9204c9 | d4145feb77506c6a318a07037a15aa1512bd55ce | refs/heads/master | 2020-05-09T15:57:28.419318 | 2013-05-20T13:55:25 | 2013-05-20T13:55:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,582 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Promocion.descripcion'
db.add_column(u'cupon_promocion', 'descripcion',
self.gf('django.db.models.fields.CharField')(default='hola', max_length=200),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Promocion.descripcion'
db.delete_column(u'cupon_promocion', 'descripcion')
models = {
u'cupon.cupon': {
'Meta': {'object_name': 'Cupon'},
'fecha_creacion': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_promocion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cupon.Promocion']"}),
'num_cupon': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'cupon.promocion': {
'Meta': {'object_name': 'Promocion'},
'descripcion': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'estado': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '1'}),
'fecha_creacion': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'fecha_publicacion': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fecha_termino': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_empresa': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['empresa.Empresa']"}),
'imagen': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'num_limite': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'empresa.empresa': {
'Meta': {'object_name': 'Empresa'},
'calle': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'colonia': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'estado': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'giro': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'nombre_encargado': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'num_exterior': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'num_interior': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'pagina_web': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'telefono': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['cupon'] | [
"enrique.wx@gmail.com"
] | enrique.wx@gmail.com |
533ac91d6a3a858bfbdb52ce3fa9904106f64220 | c07ec6da7992a50ed819b4fe4d8a37d275828e98 | /divisibleSumPairs.py | 534c1b40bc5d6d264a65c12e80da2dac20851c7b | [] | no_license | basilvetas/HackerRank | 03f20c3a55d52d996d90dd3b43ef21e9e2d57461 | a24787631354d2ba076f841e6df43f0268f0d34a | refs/heads/master | 2020-12-02T23:01:06.674777 | 2017-08-21T15:16:46 | 2017-08-21T15:16:46 | 96,218,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | #!/bin/python3
import sys
def divisibleSumPairs(n, k, ar):
# Complete this function
count = 0
for i in range(n-1):
for j in range(i+1, n):
if((ar[i] + ar[j]) % k == 0):
count += 1
return count
n, k = input().strip().split(' ')
n, k = [int(n), int(k)]
ar = list(map(int, input().strip().split(' ')))
result = divisibleSumPairs(n, k, ar)
print(result) | [
"basilvetas@gmail.com"
] | basilvetas@gmail.com |
38115f76d54bbe26259ce8a200bc6f270fa0d7fd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03773/s566332295.py | 1593f7c298c88100c0c942d38fec3f2b77e644d8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | def main():
num = list(map(int,input().split()))
print((num[0]+num[1])%24)
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e93f57f991b6c6ff7bd97221fd2e2ced2050d5c8 | d0a042a6518d66d092796212fea6507c879c00c2 | /PIAIC121874_Assignment2.py | 96fc7a6048d823ff9cb7b824e21bce47a9a5f5e5 | [] | no_license | fizazafar/PIAIC-AIC-Batch-13 | 5c0a813ce87c7a8405e514932cefa7234d6c631d | 3de1862f5a00808f4145ea5e7b4f63ed8dfa4a1c | refs/heads/main | 2023-02-11T21:43:25.561517 | 2021-01-03T17:07:11 | 2021-01-03T17:07:11 | 326,457,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,615 | py | #Fiza Zafar PIAIC121874
# %load Assignment2.py
# Read Instructions carefully before attempting this assignment
# 1) don't rename any function name
# 2) don't rename any variable name
# 3) don't remove any #comment
# 4) don't remove """ under triple quate values """
# 5) you have to write code where you found "write your code here"
# 6) after download rename this file with this format "PIAICCompletRollNumber_AssignmentNo.py"
# Example piaic17896_Assignment1.py
# 7) After complete this assignment please push on your own GitHub repository.
# 8) you can submit this assignment through the google form
# 9) copy this file absolute URL then paste in the google form
# The example above: https://github.com/EnggQasim/Batch04_to_35/blob/main/Sunday/1_30%20to%203_30/Assignments/assignment1.txt
# * Because all assignment we will be checked through software if you missed any above points
# * then we can't assign your scores in our database.
import numpy as np
# Task1
def function1():
# create 2d array from 1,12 range
# dimension should be 6row 2 columns
# and assign this array values in x values in x variable
# Hint: you can use arange and reshape numpy methods
x = np.arange(1,13).reshape((6,2))
return x
"""
expected output:
[[ 1 2]
[ 3 4]
[ 5 6]
[ 7 8]
[ 9 10]
[11 12]]
"""
# Task2
def function2():
#create 3D array (3,3,3)
#must data type should have float64
#array value should be satart from 10 and end with 36 (both included)
# Hint: dtype, reshape
x = np.arange(10,37,dtype=np.float64).reshape((3,3,3)) #wrtie your code here
return x
"""
Expected: out put
array([[[10., 11., 12.],
[13., 14., 15.],
[16., 17., 18.]],
[[19., 20., 21.],
[22., 23., 24.],
[25., 26., 27.]],
[[28., 29., 30.],
[31., 32., 33.],
[34., 35., 36.]]])
"""
#Task3
def function3():
#extract those numbers from given array. those are must exist in 5,7 Table
#example [35,70,105,..]
a = np.arange(1, 100*10+1).reshape((100,10))
x = a[(a % 5 == 0) & (a % 7 == 0)] #wrtie your code here
return x
"""
Expected Output:
[35, 70, 105, 140, 175, 210, 245, 280, 315, 350, 385, 420, 455,
490, 525, 560, 595, 630, 665, 700, 735, 770, 805, 840, 875, 910,
945, 980]
"""
#Task4
def function4():
#Swap columns 1 and 2 in the array arr.
arr = np.arange(9).reshape(3,3)
return arr[:,[1,0,2]] #wrtie your code here
"""
Expected Output:
array([[1, 0, 2],
[4, 3, 5],
[7, 6, 8]])
"""
#Task5
def function5():
#Create a null vector of size 20 with 4 rows and 5 columns with numpy function
z = np.zeros((4,5)) #wrtie your code here
return z
"""
Expected Output:
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
"""
#Task6
def function6():
# Create a null vector of size 10 but the fifth and eighth value which is 10,20 respectively
arr = np.zeros(10);arr[4]=10;arr[7]=20 #wrtie your code here
return arr
#Task7
def function7():
# Create an array of zeros with the same shape and type as X. Dont use reshape method
x = np.arange(4, dtype=np.int64)
return np.zeros_like(x) #write your code here
"""
Expected Output:
array([0, 0, 0, 0], dtype=int64)
"""
#Task8
def function8():
# Create a new array of 2x5 uints, filled with 6.
x = np.full((2,5),6,dtype=np.uint32) #write your code here
return x
"""
Expected Output:
array([[6, 6, 6, 6, 6],
[6, 6, 6, 6, 6]], dtype=uint32)
"""
#Task9
def function9():
# Create an array of 2, 4, 6, 8, ..., 100.
a = np.arange(2,101,2) # write your code here
return a
"""
Expected Output:
array([ 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26,
28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52,
54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78,
80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100])
"""
#Task10
def function10():
# Subtract the 1d array brr from the 2d array arr, such that each item of brr subtracts from respective row of arr.
arr = np.array([[3,3,3],[4,4,4],[5,5,5]])
brr = np.array([1,2,3])
subt = arr-brr[:,None] # write your code here
return subt
"""
Expected Output:
array([[2 2 2]
[2 2 2]
[2 2 2]])
"""
#Task11
def function11():
# Replace all odd numbers in arr with -1 without changing arr.
arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
ans = np.where((arr%2==1), -1, arr) #write your code here
return ans
"""
Expected Output:
array([ 0, -1, 2, -1, 4, -1, 6, -1, 8, -1])
"""
#Task12
def function12():
# Create the following pattern without hardcoding. Use only numpy functions and the below input array arr.
# HINT: use stacking concept
arr = np.array([1,2,3])
ans = np.hstack((np.repeat(arr,3),arr,arr,arr)) #write your code here
return ans
"""
Expected Output:
array([1, 1, 1, 2, 2, 2, 3, 3, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3])
"""
#Task13
def function13():
# Set a condition which gets all items between 5 and 10 from arr.
arr = np.array([2, 6, 1, 9, 10, 3, 27])
ans = arr[np.all([arr>5,arr<10], axis= 0)] #write your code here
return ans
"""
Expected Output:
array([6, 9])
"""
#Task14
def function14():
# Create an 8X3 integer array from a range between 10 to 34 such that the difference between each element is 1 and then Split the array into four equal-sized sub-arrays.
# Hint use split method
arr = np.arange(10, 34, 1) #write reshape code
ans = np.split(arr, 4) #write your code here
return ans
"""
Expected Output:
[array([[10, 11, 12],[13, 14, 15]]),
array([[16, 17, 18],[19, 20, 21]]),
array([[22, 23, 24],[25, 26, 27]]),
array([[28, 29, 30],[31, 32, 33]])]
"""
#Task15
def function15():
#Sort following NumPy array by the second column
arr = np.array([[ 8, 2, -2],[-4, 1, 7],[ 6, 3, 9]])
ans = arr[np.argsort(arr[:, 1])] #write your code here
return ans
"""
Expected Output:
array([[-4, 1, 7],
[ 8, 2, -2],
[ 6, 3, 9]])
"""
#Task16
def function16():
#Write a NumPy program to join a sequence of arrays along depth.
x = np.array([[1], [2], [3]])
y = np.array([[2], [3], [4]])
ans = np.dstack((x,y)) #write your code here
return ans
"""
Expected Output:
[[[1 2]]
[[2 3]]
[[3 4]]]
"""
#Task17
def function17():
# replace numbers with "YES" if it divided by 3 and 5
# otherwise it will be replaced with "NO"
# Hint: np.where
arr = np.arange(1,10*10+1).reshape((10,10))
return np.where([(arr%3 == 0) & (arr%5 == 0)], "YES", "NO") # Write Your Code HERE
#Excpected Out
"""
array([['NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO'],
['NO', 'NO', 'NO', 'NO', 'YES', 'NO', 'NO', 'NO', 'NO', 'NO'],
['NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'YES'],
['NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO'],
['NO', 'NO', 'NO', 'NO', 'YES', 'NO', 'NO', 'NO', 'NO', 'NO'],
['NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'YES'],
['NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO'],
['NO', 'NO', 'NO', 'NO', 'YES', 'NO', 'NO', 'NO', 'NO', 'NO'],
['NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'YES'],
['NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO', 'NO']],
dtype='<U3')
"""
#Task18
def function18():
# count values of "students" are exist in "piaic"
piaic = np.arange(100)
students = np.array([5,20,50,200,301,7001])
x = len(set(piaic)&set(students)) # Write you code Here
return x
#Expected output: 3
# Task19
def function19():
#Create variable "X" from 1,25 (both are included) range values
#Convert "X" variable dimension into 5 rows and 5 columns
#Create one more variable "W" copy of "X"
#Swap "W" row and column axis (like transpose)
# then create variable "b" with value equal to 5
# Now return output as "(X*W)+b:
X = np.arange(1,26).reshape(5,5) # Write your code here
W = X.T # Write your code here
b = 5 # Write your code here
output = X*W+b # Write your code here
return output
#expected output
"""
array([[ 6, 17, 38, 69, 110],
[ 17, 54, 101, 158, 225],
[ 38, 101, 174, 257, 350],
[ 69, 158, 257, 366, 485],
[110, 225, 350, 485, 630]])
"""
#Task20
def function20():
#apply fuction "abc" on each value of Array "X"
x = np.arange(1,11)
def abc(x):
return x*2+3-2
return np.abs(abc(x)) #Write your Code here
#Expected Output: array([ 3, 5, 7, 9, 11, 13, 15, 17, 19, 21])
#--------------------------X-----------------------------X-----------------------------X----------------------------X---------------------
| [
"noreply@github.com"
] | fizazafar.noreply@github.com |
7901ff4f344409cdb21e823edb41a0cfe4a66295 | 5acc70e767c16531f99becf878c645c24d24b09c | /src/location/location.py | 2244c345e4a56e95ab81e4ae23a325a3f3599f12 | [] | no_license | mwess/MailClassify | acd45abf2cb35f63494bdc1ade1ff389b898a91c | c98afdd45dd9368d8e7806bef228de14fcc88cc3 | refs/heads/master | 2020-04-25T09:33:36.991589 | 2019-03-05T16:31:33 | 2019-03-05T16:31:33 | 172,679,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,671 | py | """
This module is concered with the extraction of location information from strings.
"""
import re
from nltk import word_tokenize
import src.utils.preprocessing as pp
from src.utils.processpipeline import ProcessPipeline
from src.utils.mails import transform_mail
# List of locations for which we accept projects.
list_of_acceptable_locations = [
'Frankfurt',
'Frannkfurt', # Typo
'Wiesbaden',
'Darmstadt',
'Mainz',
'Neu Isenburg',
'Neu-Isenburg',
'Eschborn',
'Bad Vilbel',
'Bad Homburg',
# Abbreviations
'FFM',
# General area descriptions
'Rhein-Main',
'Rhein Main',
'Rhein',
'Main', # Beware english mails.
'Homburg',
'Vilbel',
'Isenburg',
'ffm',
'rhein-main',
'rhein main',
'rhein',
'rhein-main-gebiet',
'frankfurt/main',
]
city_filters = [
pp.lowercase_chars,
pp.remove_braced_information,
pp.transform_umlaute,
pp.remove_BAD_prefix,
pp.remove_AM_connector,
pp.remove_IM_connector,
pp.remove_AN_DER_connector,
pp.remove_IN_DER_connector,
pp.remove_IN_connector,
pp.remove_OB_DER_connector,
pp.remove_BEI_connector,
pp.remove_VOR_DER_connector,
pp.escapes_dots,
pp.strip,
]
def add_custom_keywords(cities):
additional_keywords = [
'ffm',
'rhein-main',
'rhein main',
'rhein',
'rhein-main-gebiet',
'frankfurt/main',
]
return cities + additional_keywords
def remove_custom_keywords(cities):
"""
Remove the following words from cities because their semantic meaning is too ambiguous
:param cities:
:return:
"""
remove_keywords = [
'weil',
'waren',
'lage',
'senden',
'wissen',
]
return [x for x in cities if x not in remove_keywords]
def add_foreign_cities():
return [
'Zürich',
'Zurich',
'Wien',
'Vienna',
]
def load_city_list(fname, preprossing=True):
city_preprocess_filter = ProcessPipeline(city_filters)
with open(fname) as f:
cities = f.readlines()
cities += add_foreign_cities()
if preprossing:
cities = list(map(lambda city: city_preprocess_filter.execute(city), cities))
cities = add_custom_keywords(cities)
cities = remove_custom_keywords(cities)
return cities
def extract_location_names(message, city_list):
"""
message is already preprocessed
city list has already
"""
tokens = word_tokenize(message, language='german')
cities = []
for token in tokens:
if token in city_list:
cities.append(token)
return cities
class LocationExtraction:
def __init__(self, acceptable_cities):
self._cities = acceptable_cities
self._location_filters = []
self._regex_prefix = r'(?=(?:^|[\s.,!?;:]){1}?'
self._regex_suffix = r'(?:[\s.,!?;:]|$){1}?)'
self._location_expression = ''
self._setup_filters()
self._build_location_regex()
def _setup_filters(self):
self._location_filters = [
pp.remove_pentasys_header_for_location,
pp.reduce_http,
pp.replace_punctuations,
pp.transform_umlaute,
pp.remove_non_ascii,
pp.lowercase_chars,
pp.replace_trailing_dashes,
pp.remove_arithmetic_symbols,
pp.remove_braces,
pp.filter_main_body,
pp.reduce_whitespaces,
]
self._region_filters = [
pp.remove_pentasys_header_for_location,
pp.reduce_http,
pp.replace_punctuations,
pp.transform_umlaute,
pp.remove_non_ascii,
pp.lowercase_chars,
pp.remove_arithmetic_symbols,
pp.remove_braces,
pp.filter_main_body,
pp.reduce_whitespaces,
]
def extract_cities(self, message):
locations = re.findall(self._location_expression, message)
return locations
def _build_location_regex(self):
self._location_expression = self._regex_prefix + '(' + '|'.join(self._cities) + ')' + self._regex_suffix
def extract_locations(self, mail):
message = transform_mail(mail, self._location_filters)
locations = self.extract_cities(message)
regions = self.extract_region_patterns(message)
return list(set(locations + regions))
def extract_region_patterns(self, message):
region_pattern_d = r'(?=([Dd]{1}[0-9]{1,5}([^.]|$)))'
matches = re.finditer(region_pattern_d, message)
found_regions = [match.group(1).strip() for match in matches]
region_pattern_plz = r'(?=(plz\s*[0-9]{1,5}([^.]|$)))'
matches = re.finditer(region_pattern_plz, message)
found_regions += [match.group(1).strip() for match in matches]
return found_regions
@classmethod
def from_file(cls, fname):
cities = load_city_list(fname)
return cls(cities)
@classmethod
def load_from_default_file(cls):
default_file = 'models/location/german_city_tree.txt'
return LocationExtraction.from_file(default_file)
@staticmethod
def extract_all_locations(mail):
extractor = LocationExtraction.load_from_default_file()
return extractor.extract_locations(mail)
@staticmethod
def contains_acceptable_locations(locations):
acceptable_locations = list(map(lambda x: x.lower(), list_of_acceptable_locations))
if any(map(lambda location: location in acceptable_locations, locations)):
return True
else:
return False
| [
"maximilian.wess@detim.de"
] | maximilian.wess@detim.de |
96dcb8ab00d66cbd158cc145f3e2edcf4646797a | 99e05a85f22f7cf192aab03e103a94df7652ec94 | /FlaskExercise/models.py | 45ba9993ac808b490ded4690f733e58798404e47 | [
"Apache-2.0"
] | permissive | saidulislam/azure-storage-webapp-zoo | cb72ae0107ca592df52b962520620ac41a618c43 | db274b3927ca652ce3df5352c416998180f2301a | refs/heads/main | 2023-06-04T19:40:38.739137 | 2021-06-29T04:19:03 | 2021-06-29T04:19:03 | 381,226,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,540 | py | from FlaskExercise import app, db
from flask import flash
from werkzeug.utils import secure_filename
from azure.storage.blob import BlobServiceClient
import uuid
blob_container = app.config['BLOB_CONTAINER']
storage_url = "https://{}.blob.core.windows.net/".format(app.config['BLOB_ACCOUNT'])
blob_service = BlobServiceClient(account_url=storage_url, credential=app.config['BLOB_STORAGE_KEY'])
class Animal(db.Model):
__tablename__ = 'animals'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(75))
scientific_name = db.Column(db.String(75))
description = db.Column(db.String(800))
image_path = db.Column(db.String(100))
def __repr__(self):
return '<Animal {}>'.format(self.body)
def save_changes(self, file):
if file:
filename = secure_filename(file.filename)
fileExtension = filename.rsplit('.', 1)[1]
randomFilename = str(uuid.uuid1())
filename = randomFilename + '.' + fileExtension
try:
blob_client = blob_service.get_blob_client(container=blob_container, blob=filename)
blob_client.upload_blob(file)
if self.image_path: # Get rid of old image, since it's replaced
blob_client = blob_service.get_blob_client(container=blob_container, blob=self.image_path)
blob_client.delete_blob()
except Exception as err:
flash(err)
self.image_path = filename
db.session.commit()
| [
"noreply@github.com"
] | saidulislam.noreply@github.com |
574996d91f07ac782294a67386dd4d525ab5f8df | d13303fb718bf02f61a6141ed0cb48294f76175c | /dev/lambda/lambda_function_sr.py | 436156fcca1837c83fae8ab5730345fcc03ccaaa | [
"Apache-2.0"
] | permissive | sdrohrer/lexflex | 8e8ae462d520a2a6911debebcc3dbd235ba47e92 | 87bf528a2a6798e595ac30fe709b2d01d6674296 | refs/heads/master | 2021-09-01T12:04:02.748984 | 2017-12-26T22:05:23 | 2017-12-26T22:05:23 | 114,684,698 | 0 | 1 | Apache-2.0 | 2017-12-23T19:25:31 | 2017-12-18T20:29:00 | Python | UTF-8 | Python | false | false | 1,192 | py | ########################################
#this is the sample python 3.6 lambda function blueprint code
#I called mine steveohello
#to execute this in the Lambda Management Console you must save the code and then configure the basic test event and then TEST
#This tutorial https://www.youtube.com/watch?v=hzlxWBs1Qt4 covers it (just first 8 minutes)
########################################
import json
# you import the json package to use this
print('Loading function')
#lambda takes a few inputs, so far I only understand event as the trigger
#not sure about context or callback which is not mentioned yet
def lambda_handler(event, context):
#ignore this print command for now
##print("Received event: " + json.dumps(event, indent=2))
#these print commands only show up in the log
print("value1 = " + event['key1'])
print("value2 = " + event['key2'])
print("value3 = " + event['key3'])
#this return command is ultimately the only thing rendered. meaning only the key1 passed to the class is output
return event['key1'] # Echo back the first key value
#ignore this raise command for now
##raise Exception('Something went wrong')
| [
"noreply@github.com"
] | sdrohrer.noreply@github.com |
1f963650bcb5eb70de1917b67c2e511010975132 | e8bb51626c32d173cf248e5c47d09eb50d943a31 | /Programmes/PYTHON/TP1/Ui_screenUI.py | c2d9d2c835c83221ab9199d3cd263f27f9007fb2 | [] | no_license | juantorres9/Presentation | dde458ceaa428c372633afba9e84e00b3f0bdc9d | a86badfd2dc12785058ddfe0660d48e7ce42bcc4 | refs/heads/master | 2021-01-12T03:17:52.341247 | 2017-01-29T23:58:52 | 2017-01-29T23:58:52 | 78,183,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\juantorres\TOTAL\PYTHON\TP1\screenUI.ui'
#
# Created by: PyQt5 UI code generator 5.5
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 300)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(30, 240, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.listView = QtWidgets.QListView(Dialog)
self.listView.setGeometry(QtCore.QRect(70, 10, 256, 192))
self.listView.setObjectName("listView")
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(100, 230, 75, 23))
self.pushButton.setObjectName("pushButton")
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButton.setText(_translate("Dialog", "PushButton"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| [
"TORRES ZETINO juan carlos"
] | TORRES ZETINO juan carlos |
c51f286624640820706df54c61f1e49246320510 | aa6cfedb0578c21742545ce4f59477fbddf8da4f | /QAMAS/_build/jupyter_execute/Abstract.py | 9bd003f3b68839d1cdcf3e2e6f690745e2a11efb | [
"MIT"
] | permissive | beards-lab/QAMAS_book | de109efb5c85e4a8454dcdcc1aabed1ce7836a60 | 8a6b78fbf3564ce313380619900f2de5fcbe9035 | refs/heads/master | 2023-04-08T10:34:13.554378 | 2021-06-09T21:04:46 | 2021-06-09T21:04:46 | 336,389,470 | 1 | 1 | MIT | 2021-06-08T22:15:17 | 2021-02-05T21:07:48 | Jupyter Notebook | UTF-8 | Python | false | false | 2,334 | py | #!/usr/bin/env python
# coding: utf-8
# # Title Page
#
# ## Authors
#
# E. Benjamin Randall$^{1}$, Marcus Hock$^{2}$, Rachel Lopez$^{1}$, Bahador Marzban$^{1}$, Collin Marshall$^{1}$, Daniel A. Beard$^{1*}$
#
# $^{1}$ *Department of Molecular and Integrative Physiology, University of Michigan, Ann Arbor, MI*
#
# $^{2}$ *Department of Bioengineering, University of California at San Diego, San Diego, CA*
#
# *Corresponding author
#
# *Email addresses*: ebrandal@umich.edu (E.B. Randall), m1hock@eng.ucsd.edu (M. Hock), ralopez@umich.edu (R. Lopez), bmarzban@umich.edu (B. Marzban), colmar@umich.edu (C. Marshall), beardda@umich.edu (D.A. Beard).
#
#
# ## Abstract
#
# We present a computational framework for analyzing and simulating mitochondrial ATP synthesis using basic thermodynamic and kinetic principles. The framework invokes detailed descriptions of the thermodynamic driving forces associated with the processes of the electron transport chain, mitochondrial ATP synthetase, and phosphate and adenine nucleotide transporters. Assembling models of these discrete processes into an integrated model of mitochondrial ATP synthesis, we illustrate how to analyze and simulate in vitro respirometry experiments and how models identified from in vitro experimental data effectively explain cardiac respiratory control in vivo. Computer codes for these analyses are embedded as Python scripts in a Jupyter Book to facilitate easy adoption and modification of the concepts developed here. This accessible framework may also prove useful in supporting educational applications. All source codes are available on at <a href="https://beards-lab.github.io/QAMAS_book/">https://beards-lab.github.io/QAMAS_book/</a>.
#
#
# ## Highlights
#
# - A kinetic and thermodynamic framework for mitochondrial energetics is developed.
# - The framework is applied to simulate ATP synthesis and respiratory control.
# - We illustrate how respiratory control in vitro translates to energetics in vivo.
# - Computer codes are available at DOI: 10.5281/zenodo.4919564.
#
#
# ## Funding
#
# This work supported by NIH grant HL144657.
#
# In[ ]:
#
# ```{toctree}
# :hidden:
# :titlesonly:
#
#
# Abbreviations
# Introduction
# Principles
# BuildingModel
# InVitroModel
# InVivoModel
# Summary
# References
# ```
#
| [
"ebenjaminrandall@gmail.com"
] | ebenjaminrandall@gmail.com |
28c2a1397296c0b3d064628bce32890e02e60df9 | c23eacbdc07236ebeb21bccd75a5ed0b0fb5ad35 | /binance/websockets.py | a418fa2189e3554f338d7443ba60554120675258 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | kheaactua/python-binance | 8086dc0facbf5380efcce9f640eaa57c6e0eb3eb | 47b8fd2b571f8f85895b080de6f422aa386e7c99 | refs/heads/master | 2021-09-03T08:22:35.592543 | 2018-01-07T13:58:12 | 2018-01-07T13:58:12 | 116,569,527 | 1 | 0 | null | 2018-01-07T13:54:35 | 2018-01-07T13:54:34 | null | UTF-8 | Python | false | false | 16,339 | py | #!/usr/bin/env python
# coding=utf-8
import json
import threading
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
from twisted.internet import reactor, ssl
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.internet.error import ReactorAlreadyRunning
from binance.client import Client
class BinanceClientProtocol(WebSocketClientProtocol):
def onConnect(self, response):
# reset the delay after reconnecting
self.factory.resetDelay()
def onMessage(self, payload, isBinary):
if not isBinary:
try:
payload_obj = json.loads(payload.decode('utf8'))
except ValueError:
pass
else:
self.factory.callback(payload_obj)
class BinanceReconnectingClientFactory(ReconnectingClientFactory):
# set initial delay to a short time
initialDelay = 0.1
maxDelay = 10
maxRetries = 5
class BinanceClientFactory(WebSocketClientFactory, BinanceReconnectingClientFactory):
protocol = BinanceClientProtocol
def clientConnectionFailed(self, connector, reason):
self.retry(connector)
def clientConnectionLost(self, connector, reason):
self.retry(connector)
class BinanceSocketManager(threading.Thread):
STREAM_URL = 'wss://stream.binance.com:9443/'
WEBSOCKET_DEPTH_5 = '5'
WEBSOCKET_DEPTH_10 = '10'
WEBSOCKET_DEPTH_20 = '20'
_user_timeout = 30 * 60 # 30 minutes
def __init__(self, client):
"""Initialise the BinanceSocketManager
:param client: Binance API client
:type client: binance.Client
"""
threading.Thread.__init__(self)
self._conns = {}
self._user_timer = None
self._user_listen_key = None
self._user_callback = None
self._client = client
def _start_socket(self, path, callback, prefix='ws/'):
if path in self._conns:
return False
factory_url = self.STREAM_URL + prefix + path
factory = BinanceClientFactory(factory_url)
factory.protocol = BinanceClientProtocol
factory.callback = callback
factory.reconnect = True
context_factory = ssl.ClientContextFactory()
self._conns[path] = connectWS(factory, context_factory)
return path
def start_depth_socket(self, symbol, callback, depth=None):
"""Start a websocket for symbol market depth returning either a diff or a partial book
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#partial-book-depth-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param depth: optional Number of depth entries to return, default None. If passed returns a partial book instead of a diff
:type depth: enum
:returns: connection key string if successful, False otherwise
Partial Message Format
.. code-block:: python
{
"lastUpdateId": 160, # Last update ID
"bids": [ # Bids to be updated
[
"0.0024", # price level to be updated
"10", # quantity
[] # ignore
]
],
"asks": [ # Asks to be updated
[
"0.0026", # price level to be updated
"100", # quantity
[] # ignore
]
]
}
Diff Message Format
.. code-block:: python
{
"e": "depthUpdate", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"U": 157, # First update ID in event
"u": 160, # Final update ID in event
"b": [ # Bids to be updated
[
"0.0024", # price level to be updated
"10", # quantity
[] # ignore
]
],
"a": [ # Asks to be updated
[
"0.0026", # price level to be updated
"100", # quantity
[] # ignore
]
]
}
"""
socket_name = symbol.lower() + '@depth'
if depth and depth != '1':
socket_name = '{}{}'.format(socket_name, depth)
return self._start_socket(socket_name, callback)
def start_kline_socket(self, symbol, callback, interval=Client.KLINE_INTERVAL_1MINUTE):
"""Start a websocket for symbol kline data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#klinecandlestick-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param interval: Kline interval, default KLINE_INTERVAL_1MINUTE
:type interval: enum
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "kline", # event type
"E": 1499404907056, # event time
"s": "ETHBTC", # symbol
"k": {
"t": 1499404860000, # start time of this bar
"T": 1499404919999, # end time of this bar
"s": "ETHBTC", # symbol
"i": "1m", # interval
"f": 77462, # first trade id
"L": 77465, # last trade id
"o": "0.10278577", # open
"c": "0.10278645", # close
"h": "0.10278712", # high
"l": "0.10278518", # low
"v": "17.47929838", # volume
"n": 4, # number of trades
"x": false, # whether this bar is final
"q": "1.79662878", # quote volume
"V": "2.34879839", # volume of active buy
"Q": "0.24142166", # quote volume of active buy
"B": "13279784.01349473" # can be ignored
}
}
"""
socket_name = '{}@kline_{}'.format(symbol.lower(), interval)
return self._start_socket(socket_name, callback)
def start_trade_socket(self, symbol, callback):
"""Start a websocket for symbol trade data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#trade-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "trade", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"t": 12345, # Trade ID
"p": "0.001", # Price
"q": "100", # Quantity
"b": 88, # Buyer order Id
"a": 50, # Seller order Id
"T": 123456785, # Trade time
"m": true, # Is the buyer the market maker?
"M": true # Ignore.
}
"""
return self._start_socket(symbol.lower() + '@trade', callback)
def start_aggtrade_socket(self, symbol, callback):
"""Start a websocket for symbol trade data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#aggregate-trade-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "aggTrade", # event type
"E": 1499405254326, # event time
"s": "ETHBTC", # symbol
"a": 70232, # aggregated tradeid
"p": "0.10281118", # price
"q": "8.15632997", # quantity
"f": 77489, # first breakdown trade id
"l": 77489, # last breakdown trade id
"T": 1499405254324, # trade time
"m": false, # whether buyer is a maker
"M": true # can be ignored
}
"""
return self._start_socket(symbol.lower() + '@aggTrade', callback)
def start_symbol_ticker_socket(self, symbol, callback):
"""Start a websocket for a symbol's ticker data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#individual-symbol-ticker-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "24hrTicker", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"p": "0.0015", # Price change
"P": "250.00", # Price change percent
"w": "0.0018", # Weighted average price
"x": "0.0009", # Previous day's close price
"c": "0.0025", # Current day's close price
"Q": "10", # Close trade's quantity
"b": "0.0024", # Best bid price
"B": "10", # Bid bid quantity
"a": "0.0026", # Best ask price
"A": "100", # Best ask quantity
"o": "0.0010", # Open price
"h": "0.0025", # High price
"l": "0.0010", # Low price
"v": "10000", # Total traded base asset volume
"q": "18", # Total traded quote asset volume
"O": 0, # Statistics open time
"C": 86400000, # Statistics close time
"F": 0, # First trade ID
"L": 18150, # Last trade Id
"n": 18151 # Total number of trades
}
"""
return self._start_socket(symbol.lower() + '@ticker', callback)
def start_ticker_socket(self, callback):
"""Start a websocket for all ticker data
By default all markets are included in an array.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#all-market-tickers-stream
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
[
{
'F': 278610,
'o': '0.07393000',
's': 'BCCBTC',
'C': 1509622420916,
'b': '0.07800800',
'l': '0.07160300',
'h': '0.08199900',
'L': 287722,
'P': '6.694',
'Q': '0.10000000',
'q': '1202.67106335',
'p': '0.00494900',
'O': 1509536020916,
'a': '0.07887800',
'n': 9113,
'B': '1.00000000',
'c': '0.07887900',
'x': '0.07399600',
'w': '0.07639068',
'A': '2.41900000',
'v': '15743.68900000'
}
]
"""
return self._start_socket('!ticker@arr', callback)
def start_multiplex_socket(self, streams, callback):
"""Start a multiplexed socket using a list of socket names.
User stream sockets can not be included.
Symbols in socket name must be lowercase i.e bnbbtc@aggTrade, neobtc@ticker
Combined stream events are wrapped as follows: {"stream":"<streamName>","data":<rawPayload>}
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md
:param streams: list of stream names in lower case
:type streams: list
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format - see Binance API docs for all types
"""
stream_path = 'streams={}'.format('/'.join(streams))
return self._start_socket(stream_path, callback, 'stream?')
def start_user_socket(self, callback):
"""Start a websocket for user data
https://www.binance.com/restapipub.html#user-wss-endpoint
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format - see Binance API docs for all types
"""
if self._user_listen_key:
# cleanup any sockets with this key
for conn_key in self._conns:
if len(conn_key) >= 60 and conn_key[:60] == self._user_listen_key:
self.stop_socket(conn_key)
break
self._user_listen_key = self._client.stream_get_listen_key()
self._user_callback = callback
conn_key = self._start_socket(self._user_listen_key, callback)
if conn_key:
# start timer to keep socket alive
self._start_user_timer()
return conn_key
def _start_user_timer(self):
self._user_timer = threading.Timer(self._user_timeout, self._keepalive_user_socket)
self._user_timer.setDaemon(True)
self._user_timer.start()
def _keepalive_user_socket(self):
listen_key = self._client.stream_get_listen_key()
# check if they key changed and
if listen_key != self._user_listen_key:
self.start_user_socket(self._user_callback)
self._start_user_timer()
def stop_socket(self, conn_key):
"""Stop a websocket given the connection key
:param conn_key: Socket connection key
:type conn_key: string
:returns: connection key string if successful, False otherwise
"""
if conn_key not in self._conns:
return
# disable reconnecting if we are closing
self._conns[conn_key].factory = WebSocketClientFactory(self.STREAM_URL + 'tmp_path')
self._conns[conn_key].disconnect()
del(self._conns[conn_key])
# check if we have a user stream socket
if len(conn_key) >= 60 and conn_key[:60] == self._user_listen_key:
self._stop_user_socket()
def _stop_user_socket(self):
if not self._user_listen_key:
return
# stop the timer
self._user_timer.cancel()
self._user_timer = None
# close the stream
self._client.stream_close(listenKey=self._user_listen_key)
self._user_listen_key = None
def run(self):
try:
reactor.run(installSignalHandlers=False)
except ReactorAlreadyRunning:
# Ignore error about reactor already running
pass
def close(self):
"""Close all connections
"""
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {}
| [
"hello@sammchardy.net"
] | hello@sammchardy.net |
485eca5437cee4a4a6208ae5326f9da735df8c1c | 0358e068503e476a47b1cc6180f2032b9103e754 | /yushubook/app/spider/yushu_book.py | b0b0c1655ce5c3640305f36b1aa364f4efc3ba04 | [] | no_license | SuperHaHa1437/PycharmProjects | 9610951a7c4dda4e708e52c68631625c0755cd96 | b6e25308147a82d78949d0a0673a4262456df18a | refs/heads/master | 2022-12-09T19:58:12.837997 | 2021-04-24T09:08:01 | 2021-04-24T09:08:01 | 131,120,703 | 0 | 0 | null | 2022-12-08T05:01:58 | 2018-04-26T07:58:15 | HTML | UTF-8 | Python | false | false | 2,031 | py | """
Created by 张 on 2019/8/5
"""
from app.libs.httper import HTTP
from flask import current_app
__author__ = '张'
# 鱼书业务查询
class YuShuBook:
isbn_url = 'http://t.yushu.im/v2/book/isbn/{}'
keyword_url = 'http://t.yushu.im/v2/book/search?q={}&count={}&start={}'
def __init__(self):
self.total = 0
self.books = []
def search_by_isbn(self, isbn):
"""
isbn 搜索方法
:param isbn: 搜索 isbn
:return: 返回请求到的结果
"""
url = self.isbn_url.format(isbn)
result = HTTP.get(url)
self.__fill_single(result)
def search_by_keyword(self, keyword, page=1):
"""
关键字搜索方法
:param keyword: 搜索关键字
:param page: 页数,默认值 1
:return: 返回请求到的结果
"""
url = self.keyword_url.format(keyword, current_app.config['PER_PAGE'], self.calculate_start(page))
result = HTTP.get(url)
self.__fill_collection(result)
def __fill_single(self, data):
"""
解析 isbn 搜索的单本书籍数据
:param data: API 请求原始数据
"""
if data:
self.total = 1
self.books.append(data)
def __fill_collection(self, data):
"""
解析关键字搜索的多本书籍数据
:param data: API 请求原始数据
"""
self.total = data['total']
self.books = data['books']
def calculate_start(self, page):
"""
:param page: 搜索的结果有多少页,每 15 个结果为一页
:return: 返回每一页的从第几个结果开始返回,比如第一页的十五个结果,从第 0 个开始也就是第一个.
"""
return (page - 1) * current_app.config['PER_PAGE']
@property
def first(self):
"""
:return: 返回 book 列表中的第一个数据,在 search_by_isbn 中需要
"""
return self.books[0] if self.total >= 1 else None
| [
"565393394@qq.com"
] | 565393394@qq.com |
161a3d838a17fc88e760f1620eb54b1f12874611 | 24eeb28433680606f9d1e099b19ec595552cf06b | /repo/script.module.resolveurl/lib/resolveurl/plugins/streamvid.py | d62d62d229807de23912c2a33ab583000464b083 | [] | no_license | irmu/arda | d8ecdedc17bb01650b538dc9e00f438b6d0eed5a | 9b7cab3656c2497c812ab101a56ed661dd8cf4a7 | refs/heads/main | 2023-09-01T08:48:02.823681 | 2023-08-29T18:11:28 | 2023-08-29T18:11:28 | 151,835,016 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,658 | py | """
Plugin for ResolveURL
Copyright (C) 2020 gujal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from resolveurl.lib import helpers
from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
class StreamVidResolver(ResolveUrl):
name = 'StreamVid'
domains = ['streamvid.co', 'streamvid.cc']
pattern = r'(?://|\.)(streamvid\.(?:co|cc))/player/([0-9a-zA-Z]+)'
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.RAND_UA,
'Referer': web_url}
html = self.net.http_GET(web_url, headers=headers).content
html = helpers.get_juiced_data(html)
sources = helpers.scrape_sources(html)
if sources:
return helpers.pick_source(sources) + helpers.append_headers(headers)
raise ResolverError('Video cannot be located.')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='https://{host}/player/{media_id}/')
| [
"zacharias.sama@gmail.com"
] | zacharias.sama@gmail.com |
d5833f89424f1f61deab1343fc7aebff8a4bfae4 | cffa7525b587d2018c8af277cd48be89882f5bab | /polynomial_regression.py | 07aa39a4c19140b13359452bd4cd7f3983316f96 | [] | no_license | bapinmalakar/datascience-ml | 6a40942ce5da1e07d10cb29f290a7fcdcbd275ad | 2aa5c6400fa03cd85ae25035773ffa2551b5b798 | refs/heads/master | 2020-07-28T07:05:36.057187 | 2020-04-13T18:23:38 | 2020-04-13T18:23:38 | 209,346,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py | # Polynomial regression is form of linear regression which will modeled the relationship betwwen X and Y by nth Degree polynomial
# Its good fit for non-linear
# All suppose you want for a features your target should always increase, as X is increase then we can use polynomial
#h(Q)= Q0 + Q1*X + Q2(X^2) + Q3(X^3), this ensure taht your Y never decrease with X
import pandas as pd
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, r2_score
dataFrame = pd.read_csv('./query_result.csv', index_col="id")
X = dataFrame[['shares', 'views']]
Y = dataFrame.likes
print(X.head())
cols = [col for col in X.columns if X[col].dtype=='object']
print(cols)
#BUILD POLYNOMIAL FOR MODEL
poly = PolynomialFeatures(degree=4)
X_poly = poly.fit_transform(X)
#Create Linear model
polyModel = LinearRegression()
nonPolyModel = LinearRegression()
#trai or Fit model
polyModel.fit(X_poly, Y)
nonPolyModel.fit(X, Y)
#prediction
polyPredict = polyModel.predict(X_poly)
nonPolyPredict = nonPolyModel.predict(X)
print('\n\n', polyPredict)
print('\n\n', nonPolyPredict)
#MAE error
print('\n\n', mean_absolute_error(Y, polyPredict), r2_score(Y, polyPredict))
print('\n\n', mean_absolute_error(Y, nonPolyPredict), r2_score(Y, nonPolyPredict)) | [
"biplab@terriblytinytales.com"
] | biplab@terriblytinytales.com |
48e505a2307bf3054c1a4fb0f489f1d9dfcdd3c5 | b55d3f576aedd7674836f63a72afd7516565fbb4 | /python_assignment_4.py | 892116f96c55bd3b78c329b5150d88fbb757ae71 | [] | no_license | munishscientist/python-codes | 6d46edda12b6fe4854b309a6e38fe1f509f05c2c | d7d1a3135796a4f856134ca8f77fbbdf2507ac0f | refs/heads/master | 2020-09-22T12:48:24.864770 | 2019-12-01T17:45:24 | 2019-12-01T17:45:24 | 225,202,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | strings=input()
num=input()
num=int(num)
def exploder (strings, num):
for x in range(num) :
print(strings)
exploder(strings,num)
print(exploder)
| [
"noreply@github.com"
] | munishscientist.noreply@github.com |
9a44ddb8936bbec7bd686b72c343e890fc4aa6a3 | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/pygame/examples/testsprite.py | b03f5e714175c2787e3f2b5f3332dbe0a3261bef | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 6,991 | py | #!/usr/bin/env python
""" pg.examples.testsprite
Like the testsprite.c that comes with libsdl, this pygame version shows
lots of sprites moving around.
It is an abomination of ugly code, and mostly used for testing.
See pg.examples.aliens for some prettyier code.
"""
import sys
import os
from random import randint
from time import time
import pygame as pg
if "-psyco" in sys.argv:
# psyco was a great, but now unsupported jit for pythons before 2.7
print("No psyco for you! psyco failed to import and run.")
main_dir = os.path.split(os.path.abspath(__file__))[0]
data_dir = os.path.join(main_dir, "data")
# use this to use update rects or not.
# If the screen is mostly full, then update rects are not useful.
update_rects = True
if "-update_rects" in sys.argv:
update_rects = True
if "-noupdate_rects" in sys.argv:
update_rects = False
use_static = False
if "-static" in sys.argv:
use_static = True
use_layered_dirty = False
if "-layered_dirty" in sys.argv:
update_rects = True
use_layered_dirty = True
flags = 0
if "-flip" in sys.argv:
flags ^= pg.DOUBLEBUF
if "-fullscreen" in sys.argv:
flags ^= pg.FULLSCREEN
if "-sw" in sys.argv:
flags ^= pg.SWSURFACE
use_rle = True
if "-hw" in sys.argv:
flags ^= pg.HWSURFACE
use_rle = False
if "-scaled" in sys.argv:
flags ^= pg.SCALED
screen_dims = [640, 480]
if "-height" in sys.argv:
i = sys.argv.index("-height")
screen_dims[1] = int(sys.argv[i + 1])
if "-width" in sys.argv:
i = sys.argv.index("-width")
screen_dims[0] = int(sys.argv[i + 1])
if "-alpha" in sys.argv:
use_alpha = True
else:
use_alpha = False
print(screen_dims)
##class Thingy(pg.sprite.Sprite):
## images = None
## def __init__(self):
## pg.sprite.Sprite.__init__(self)
## self.image = Thingy.images[0]
## self.rect = self.image.get_rect()
## self.rect.x = randint(0, screen_dims[0])
## self.rect.y = randint(0, screen_dims[1])
## #self.vel = [randint(-10, 10), randint(-10, 10)]
## self.vel = [randint(-1, 1), randint(-1, 1)]
##
## def move(self):
## for i in [0, 1]:
## nv = self.rect[i] + self.vel[i]
## if nv >= screen_dims[i] or nv < 0:
## self.vel[i] = -self.vel[i]
## nv = self.rect[i] + self.vel[i]
## self.rect[i] = nv
class Thingy(pg.sprite.DirtySprite):
images = None
def __init__(self):
## pg.sprite.Sprite.__init__(self)
pg.sprite.DirtySprite.__init__(self)
self.image = Thingy.images[0]
self.rect = self.image.get_rect()
self.rect.x = randint(0, screen_dims[0])
self.rect.y = randint(0, screen_dims[1])
# self.vel = [randint(-10, 10), randint(-10, 10)]
self.vel = [randint(-1, 1), randint(-1, 1)]
self.dirty = 2
def update(self):
for i in [0, 1]:
nv = self.rect[i] + self.vel[i]
if nv >= screen_dims[i] or nv < 0:
self.vel[i] = -self.vel[i]
nv = self.rect[i] + self.vel[i]
self.rect[i] = nv
class Static(pg.sprite.DirtySprite):
images = None
def __init__(self):
pg.sprite.DirtySprite.__init__(self)
self.image = Static.images[0]
self.rect = self.image.get_rect()
self.rect.x = randint(0, 3 * screen_dims[0] / 4)
self.rect.y = randint(0, 3 * screen_dims[1] / 4)
def main(
update_rects=True,
use_static=False,
use_layered_dirty=False,
screen_dims=[640, 480],
use_alpha=False,
flags=0,
):
"""Show lots of sprites moving around
Optional keyword arguments:
update_rects - use the RenderUpdate sprite group class (default True)
use_static - include non-moving images (default False)
use_layered_dirty - Use the FastRenderGroup sprite group (default False)
screen_dims - Pygame window dimensions (default [640, 480])
use_alpha - use alpha blending (default False)
flags - additional display mode flags (default no additional flags)
"""
if use_layered_dirty:
update_rects = True
pg.init() # needed to initialise time module for get_ticks()
pg.display.init()
# if "-fast" in sys.argv:
screen = pg.display.set_mode(screen_dims, flags, vsync="-vsync" in sys.argv)
# this is mainly for GP2X, so it can quit.
pg.joystick.init()
num_joysticks = pg.joystick.get_count()
if num_joysticks > 0:
stick = pg.joystick.Joystick(0)
stick.init() # now we will receive events for the joystick
screen.fill([0, 0, 0])
pg.display.flip()
sprite_surface = pg.image.load(os.path.join(data_dir, "asprite.bmp"))
sprite_surface2 = pg.image.load(os.path.join(data_dir, "static.png"))
if use_rle:
sprite_surface.set_colorkey([0xFF, 0xFF, 0xFF], pg.SRCCOLORKEY | pg.RLEACCEL)
sprite_surface2.set_colorkey([0xFF, 0xFF, 0xFF], pg.SRCCOLORKEY | pg.RLEACCEL)
else:
sprite_surface.set_colorkey([0xFF, 0xFF, 0xFF], pg.SRCCOLORKEY)
sprite_surface2.set_colorkey([0xFF, 0xFF, 0xFF], pg.SRCCOLORKEY)
if use_alpha:
sprite_surface = sprite_surface.convert_alpha()
sprite_surface2 = sprite_surface2.convert_alpha()
else:
sprite_surface = sprite_surface.convert()
sprite_surface2 = sprite_surface2.convert()
Thingy.images = [sprite_surface]
if use_static:
Static.images = [sprite_surface2]
if len(sys.argv) > 1:
try:
numsprites = int(sys.argv[-1])
except Exception:
numsprites = 100
else:
numsprites = 100
sprites = None
if use_layered_dirty:
## sprites = pg.sprite.FastRenderGroup()
sprites = pg.sprite.LayeredDirty()
else:
if update_rects:
sprites = pg.sprite.RenderUpdates()
else:
sprites = pg.sprite.Group()
for i in range(0, numsprites):
if use_static and i % 2 == 0:
sprites.add(Static())
sprites.add(Thingy())
frames = 0
start = time()
background = pg.Surface(screen.get_size())
background = background.convert()
background.fill([0, 0, 0])
going = True
while going:
if not update_rects:
screen.fill([0, 0, 0])
## for sprite in sprites:
## sprite.move()
if update_rects:
sprites.clear(screen, background)
sprites.update()
rects = sprites.draw(screen)
if update_rects:
pg.display.update(rects)
else:
pg.display.flip()
for event in pg.event.get():
if event.type in [pg.QUIT, pg.KEYDOWN, pg.QUIT, pg.JOYBUTTONDOWN]:
going = False
frames += 1
end = time()
print(f"FPS: {frames / (end - start):f}")
pg.quit()
if __name__ == "__main__":
main(update_rects, use_static, use_layered_dirty, screen_dims, use_alpha, flags)
| [
"justin.sostmann@googlemail.com"
] | justin.sostmann@googlemail.com |
3c20d4ae5ec110e98103c612238712a0a0cfad65 | dd531974de52d1714bb216b56dcc07338a506fa5 | /src/physics/single_chain/ideal/thermodynamics/test.py | f35735590c010568b8d0384f9b58a9e8146189e5 | [
"BSD-3-Clause"
] | permissive | sandialabs/Polymers | a457718a48c26ff7041a4f1905450dad7042a0a9 | 708b76001d60101f7b2c3e33a949b177289c9fde | refs/heads/main | 2023-09-01T21:59:45.762327 | 2023-08-28T14:20:25 | 2023-08-28T14:20:25 | 531,658,209 | 11 | 1 | BSD-3-Clause | 2023-09-14T01:49:06 | 2022-09-01T19:26:15 | Rust | UTF-8 | Python | false | false | 3,540 | py | """Module to test the local module.
"""
import unittest
import numpy as np
from polymers import physics
from ..test import Parameters
parameters = Parameters()
Ideal = physics.single_chain.ideal.thermodynamics.Ideal
class Base(unittest.TestCase):
"""Class for basic tests.
"""
def test_init(self):
"""Function to test instantiation.
"""
for _ in range(parameters.number_of_loops):
_ = Ideal(
parameters.number_of_links_minimum,
parameters.link_length_reference,
parameters.hinge_mass_reference
)
def test_number_of_links(self):
"""Function to test the number of links during instantiation.
"""
for _ in range(parameters.number_of_loops):
number_of_links = \
np.random.randint(
parameters.number_of_links_minimum,
high=parameters.number_of_links_maximum
)
self.assertEqual(
number_of_links,
Ideal(
number_of_links,
parameters.link_length_reference,
parameters.hinge_mass_reference
).number_of_links
)
def test_link_length(self):
"""Function to test the link length during instantiation.
"""
for _ in range(parameters.number_of_loops):
link_length = \
parameters.link_length_reference + \
parameters.link_length_scale*(0.5 - np.random.rand())
self.assertEqual(
link_length,
Ideal(
parameters.number_of_links_minimum,
link_length,
parameters.hinge_mass_reference
).link_length
)
def test_hinge_mass(self):
"""Function to test the hinge mass during instantiation.
"""
for _ in range(parameters.number_of_loops):
hinge_mass = \
parameters.hinge_mass_reference + \
parameters.hinge_mass_scale*(0.5 - np.random.rand())
self.assertEqual(
hinge_mass,
Ideal(
parameters.number_of_links_minimum,
parameters.link_length_reference,
hinge_mass
).hinge_mass
)
def test_all_parameters(self):
"""Function to test all parameters during instantiation.
"""
for _ in range(parameters.number_of_loops):
number_of_links = \
np.random.randint(
parameters.number_of_links_minimum,
high=parameters.number_of_links_maximum
)
link_length = \
parameters.link_length_reference + \
parameters.link_length_scale*(0.5 - np.random.rand())
hinge_mass = \
parameters.hinge_mass_reference + \
parameters.hinge_mass_scale*(0.5 - np.random.rand())
model = Ideal(
number_of_links,
link_length,
hinge_mass
)
self.assertEqual(
number_of_links,
model.number_of_links
)
self.assertEqual(
link_length,
model.link_length
)
self.assertEqual(
hinge_mass,
model.hinge_mass
)
| [
"mrbuche@sandia.gov"
] | mrbuche@sandia.gov |
ef2942f022e5e2aa12b903d2053215c58246870a | 495110d9b2adba4a4df2857e4d2274d79a42f7e2 | /iterator/test_iterable.py | 5c020631c6668737135688f36466c34b6206b0cb | [] | no_license | davyjang1/python-practice | 9d43549e1f423d7274321f64bc91d44dc118b6ad | cb4fa55216091f5182dcfdf520a53e3c379c248b | refs/heads/master | 2021-09-08T06:56:54.293918 | 2018-03-08T04:27:18 | 2018-03-08T04:27:18 | 107,388,666 | 2 | 1 | null | 2017-10-18T09:57:55 | 2017-10-18T09:46:25 | Python | UTF-8 | Python | false | false | 714 | py | """
A class has its own iterator which only return even number
"""
class iterable_class:
def __init__(self):
self.list1 = []
def __iter__(self):
return my_range_iterator(self.list1)
class my_range_iterator:
def __init__(self, list1):
self.list1 = list1
self.iterator = iter(self.list1)
def __iter__(self):
return self
def __next__(self):
try:
i = next(self.iterator)
while (0 != i % 2):
i = next(self.iterator)
print ('iterator get number:' + str(i))
return i
except:
raise StopIteration()
temp = iterable_class()
temp.list1.append(1)
temp.list1.append(2)
temp.list1.append(3)
for item in temp:
print (item)
| [
"dahui.jiang@veritas.com"
] | dahui.jiang@veritas.com |
d99a072e37285ac265232ebd81305a7171dae8c0 | 59fdfc7ad025de6142b29f17d024b06bf490b668 | /astropySingleImage.py | 6f6a8c95f5c91ceca0f1e2e37f835196c49fcf40 | [] | no_license | BenGfoyle/seyfertGalaxyThesis | 59a10bdc72713745a37bd48de94715d23cf32258 | 46767241aef6e9516212a9b0b9c07147337ca072 | refs/heads/master | 2020-08-27T23:07:35.733746 | 2020-03-12T12:09:38 | 2020-03-12T12:09:38 | 217,512,243 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 23 15:05:05 2019
@author: bguilfoyle
"""
import numpy as np
import astropy.io.fits as pyfits
import glob
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from tkinter import *
#==============================================================================
def lingray(x, a=None, b=None):
"""
Overview: Auxiliary function that specifies the linear gray scale.
a and b are the cutoffs : if not specified, min and max are used
"""
if a == None:
a = np.min(x)
if b == None:
b = np.max(x)
return 255.0 * (x-float(a))/(b-a)
#==============================================================================
#==============================================================================
def loggray(x, a=None, b=None):
"""
Overview: Auxiliary function that specifies the logarithmic gray scale.
a and b are the cutoffs : if not specified, min and max are used
"""
if a == None:
a = np.min(x)
if b == None:
b = np.max(x)
linval = 10.0 + 990.0 * (x-float(a))/(b-a)
return (np.log10(linval)-1.0)*0.5 * 255.0
#==============================================================================
#==============================================================================
def correctedImage(raw,dark,flat,bias):
"""
Overview: Return a corrected image based off basic raw reduction
"""
return (raw - bias - dark) / flat
#==============================================================================
def addPlot(image,colour,newAlpha):
"""
Overview: Make a plot using plt.imshow
"""
plt.imshow(loggray(image), cmap= colour, alpha = newAlpha)
plt.colorbar()
#==============================================================================
path = "C:/Users/bguilfoyle/Documents/CompPhysics/FYP/seyfertGalaxyThesis/data/seyfertImages/"
rawRPath = path + "raw/*R.fit"
rawHPath = path + "raw/*H.fit"
rawSIIPath = path + "raw/*SII.fit"
rawVPath = path + "raw/*V.fit"
rawPath = path + "raw/*.fit"
biasPath = path + "bias/*.fit"
darkPath = path + "dark/*bin2.fit"
flatPath = path + "flat/Flat_bin2*.fit"
raw = glob.glob(rawRPath)
bias = glob.glob(biasPath)
dark = glob.glob(darkPath)
flat = glob.glob(flatPath)
avgRaw = avgImage(raw)
avgBias = avgImage(bias)
avgDark = avgImage(dark)
avgFlat = avgImage(flat)
finalCombined = correctedImage(avgRaw,avgDark,avgFlat,avgBias)
plt.imshow(loggray(finalCombined), cmap = "Reds")
plt.colorbar() | [
"noreply@github.com"
] | BenGfoyle.noreply@github.com |
ff0fea894a0c2b96f75aaf54a45a3c578c4e301d | f7c5b6427e46c08b824e2768a2bc734fac89cbc1 | /app/user.py | 4e2ca04a43053d053ce7c7a3ab8f6847a2f3e931 | [] | no_license | bjpinniger/virtualwaitingroom | c3f8d326ce72f5efb2af13dc1d3f8e074b67f801 | f29c5376a1a24143156a83502bda0fc726571319 | refs/heads/master | 2022-12-11T22:42:58.435120 | 2020-02-09T03:57:53 | 2020-02-09T03:57:53 | 233,771,198 | 0 | 1 | null | 2021-11-29T18:00:22 | 2020-01-14T06:21:49 | Python | UTF-8 | Python | false | false | 819 | py | import ldap
from config import Config
LDAP_HOST = Config.LDAP_HOST
def get_ldap_connection():
conn = ldap.initialize('ldap://%s:389' % LDAP_HOST)
return conn
class User:
def __init__(self, username):
self.username = username
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.username
@staticmethod
def validate_login(username, password):
conn = get_ldap_connection()
try:
conn.simple_bind_s(username, password)
conn.unbind_s()
result = True
except ldap.LDAPError as e:
print ("authentication error")
print (e)
result = False
return result | [
"bpinniger@gmail.com"
] | bpinniger@gmail.com |
53847d9cb45ca885ddacbb3facca9d20d007b0c3 | d3e5dd3818cbd4813aff24e390d88583e312c116 | /sampleTable.py | b8052bc5a20d701e6dde4dc99515da1652899caf | [] | no_license | silviodonato/PisaHmm | 199f586496387666485e4effc7b8080890553de9 | 10b83de9034b6a5ca558f1fb3303e402e06f7925 | refs/heads/master | 2021-06-22T18:51:48.007861 | 2021-01-14T10:37:39 | 2021-01-14T10:37:39 | 210,295,606 | 0 | 0 | null | 2021-03-29T13:34:35 | 2019-09-23T07:50:18 | Python | UTF-8 | Python | false | false | 2,079 | py | import samples2016
import samples2017
import samples2018
import models2016H
import models2016Z
import models2017H
import models2017Z
import models2018H
import models2018Z
import pprint
def removeYear( string ):
string = string.replace("2016","")
string = string.replace("2017","")
string = string.replace("2018","")
return string
groups = {}
for sampleGroups in [
models2018Z.background,models2017Z.background,models2016Z.background,
models2018Z.signal,models2017Z.signal,models2016Z.signal,
models2018H.background,models2017H.background,models2016H.background,
models2018H.signal,models2017H.signal,models2016H.signal]:
for g in sampleGroups:
if not g in groups:
groups [g] = []
for s in sampleGroups[g]:
s = removeYear(s)
if not s in groups[g]:
groups[g].append(s)
pprint.pprint(groups)
labels = ['2016Z','2017Z','2018Z','2016H','2017H','2018H']
table = ''
table += 'Group\tSample\t'
for label in labels: table += label+'\t'
table += ' \n'
groupsOrder = ["EWKZ",'VBF H',"gg H","ZH","WH","ttH","Top","DY0J","DY1J","DY2J","DY","DYVBF","Other"]
allGroups = list(groups.keys())
for group in groupsOrder:
if group in groups:
for sample in groups[group]:
table += group+'\t'+sample+'\t'
for label in labels:
models = globals()["models"+label]
sampleWithYear = sample.replace("_","_"+label[:4])
print(sample,sampleWithYear, models.background , models.signal)
if (group in models.background and sampleWithYear in models.background[group]) or (group in models.signal and sampleWithYear in models.signal[group]):
# table += sampleWithYear+'\t'
table += ' X \t'
else:
table += ' \t'
table += ' \n'
allGroups.remove(group)
if len(allGroups)==0:
print(table)
fil = open('sampleTable.txt','w')
fil.write(table)
fil.close()
else:
raise Exception(allGroups)
| [
"silvio.donato@cern.ch"
] | silvio.donato@cern.ch |
3e512b475846aa3965df33e6ee3808761d841cee | 164840bb98d45b40258a95788a8af5c50441e0a1 | /clientes/migrations/0001_initial.py | 5fc51ac3b34012d3256e4eca57604cdf5bc8912e | [] | no_license | pablofm/cuiquer | cf72d97dac09e0877985f0ea38b30fb7e2b9760b | becac80611ad99c1178ab70e97dd3f21d70ec474 | refs/heads/master | 2021-01-24T18:46:43.381380 | 2016-11-09T15:04:55 | 2016-11-09T15:04:55 | 84,471,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-04 11:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('profesionales', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cliente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha_solicitud', models.DateField(auto_now_add=True)),
('servicio', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='clientes', to='profesionales.Servicio')),
('usuario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"pafloma@gmail.com"
] | pafloma@gmail.com |
15b17ffdad629bb12da488da14d9a94f977d55a4 | 2506f37e2611137216631beea0953a58119ce412 | /steps.py | 182c6b239fe4af7335c20af788c8463046ff0eae | [] | no_license | filipibardusco/EduK-Homecourse_Test | 244262ed0598c32bdc31e7edc52aac3e2fb8de60 | 3a8122b7cf7302e07e290984a03996d626def7b5 | refs/heads/master | 2021-01-02T22:34:34.750813 | 2017-08-21T13:53:52 | 2017-08-21T13:53:52 | 99,344,571 | 0 | 0 | null | 2017-08-04T14:08:18 | 2017-08-04T13:17:40 | Python | UTF-8 | Python | false | false | 5,424 | py | from random import randint
import time
from selenium.common.exceptions import TimeoutException, NoSuchElementException
@given(u'a user enters eduK')
def impl(context):
context.browser.visit('/')
@when(u'we log in')
def impl(context):
username_field = context.browser.find_by_id('session_email')
password_field = context.browser.find_by_id('session_password')
username_field.send_keys('bardusco@gmail.com')
password_field.send_keys('123456')
@then(u'we click submit')
def impl(context):
submit_button = context.browser.find_by_xpath('//button[text()="Entrar"]')
submit_button.click()
@when(u'we click on the "gastronomia page"')
def step_impl(context):
gastronomia_button = context.browser.find_by_xpath('//*[contains(@class, "header_cat-gastronomia")]')
gastronomia_button.click()
@then(u'we should see "courses"')
def step_impl(context):
course_check = context.browser.find_by_class('course_card_card')
@when(u'we click on the "random course"')
def step_impl(context):
random_number = randint(1, len(context.browser.find_elements_by_xpath('//*[contains(@class, "course_card_card")]')))
third_course = context.browser.find_by_xpath('//ul[li[div[@class="course_card_card"]]]/li[{}]'.format(random_number))
third_course.click()
@then(u'we should see "lessons"')
def step_impl(context):
try:
context.browser.find_by_xpath('//*[contains(@class, "course_activities_lesson")]')
lesson_check = context.browser.find_by_xpath('//*[contains(@class, "course_activities_lesson")]')
except NoSuchElementException:
serie_part_check = context.browser.find_by_xpath('//*[contains(@class, "serie_episodes_episode")]')
@when(u'we click on the "random lesson"')
def step_impl(context):
sections = context.browser.find_elements_by_xpath('//ul[contains(@class, "course_activities_activities")]')
random_lesson = randint(1, (len(context.browser.find_elements_by_xpath('//section[div[ul[contains(@class, "course_activities_activities")]]]/div[1]/ul/li'))))
element = '//section[div[ul[contains(@class, "course_activities_activities")]]]/div/ul[1]/li[{}]'.format(random_lesson)
context.browser.wait_click(element)
@then(u'we should see "a video"')
def step_impl(context):
context.browser.find_by_xpath('//iframe[contains(@class, "player_frame")]')
@given(u'we make a search')
def step_impl(context):
search_box = context.browser.find_by_class('header_search-input')
search_box.clear()
search_box.send_keys('vimeo')
submit_search = context.browser.find_by_xpath('//button[contains(@class, "header_search-btn")]')
submit_search.click()
@when(u'we favourite a course')
def step_impl(context):
context.browser.wait_click('//button[contains(@class, "save_button_off")]')
@when(u'we undo the favourite course')
def step_impl(context):
context.browser.wait_click('//button[contains(@class, "save_button_on")]')
@then(u'we go to meus cursos')
def step_impl(context):
meus_cursos = context.browser.find_by_xpath('//*[text()="Meus Cursos"]')
meus_cursos.click()
@given(u'we have not completed the course')
def step_impl(context):
try:
context.browser.find_by_xpath('//*[contains(@class, "course_activities_exam-icon-blocked")]')
print("banana")
except NoSuchElementException:
# context.execute_steps(u"""
# given we make a search
# when we click on the "random course"
# given we have not completed the course
# """)
print('the definition of insanity is doing the same thing over and over again expecting different results')
@when(u'we click on the first lesson')
def step_impl(context):
context.browser.wait_click('//*[contains(@class, "course_activities_lesson")]')
@then(u'we skip to the end of the video')
def step_impl(context):
num_lessons = len(context.browser.find_elements_by_xpath('//li[contains(@class, "course_activities_lesson")]'))
for i in range(num_lessons):
time.sleep(5)
#context.browser.switch_to()
if context.browser.check_exists_by_xpath('//*[contains(@class, "cuepoints")]'):
ontext.browser.switch_to()
bar = context.browser.find_by_xpath('//*[contains(@class, "cuepoints")]')
context.browser.wait_click('//*[contains(@class, "cuepoints")]')
context.browser.click_point(bar.size['width']-1, bar.size['height']/2, '//*[contains(@class, "cuepoints")]')
time.sleep(20)
else:
context.browser.wait_click('//a[contains(@class, "question_cover_cta")]')
while context.browser.check_exists_by_xpath('//*[contains(@class, "question_container")]') == True:
context.browser.wait_click('//ul[contains(@class, "question_alternative-list")]/li[{}]'.format(randint(1,4)))
context.browser.wait_click('//div[contains(@class, "question_container")]/footer[contains(@class, "question_bottom-bar")]')
time.sleep(4)
if context.browser.check_exists_by_xpath('//div[contains(@class, "question_result")]/footer') == True:
context.browser.wait_click('//div[contains(@class, "question_result")]/footer[contains(@class, "question_bottom-bar")]')
else:
print("Wow, you're lucky")
@then(u'go through the assignment if there is no video')
def step_impl(context):
assert True
| [
"noreply@github.com"
] | filipibardusco.noreply@github.com |
42fd82bd04b3935f33d124cb4a900608fbe5d506 | 7357edd3aaab7c6ffe0f2e301ad8f4fdee21eca3 | /news/models.py | f6d0c303b401d84b77b14934371b8568f9978a84 | [] | no_license | Janteby1/hacker_news_clone | 5e7586fd89daa21e909f50fcc23de1f747dd1b13 | 9aed74211770db04607ca37b88ed03dc7fd492f2 | refs/heads/master | 2021-01-10T16:33:51.596506 | 2016-03-11T03:40:09 | 2016-03-11T03:40:09 | 53,432,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,861 | py | from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
from django.utils import timezone #make sure to set the timezone
# Create your models here.
class UserProfile(models.Model):
# this line links UserProfile to a user model instance
user = models.OneToOneField(User)
# here we can add aditional attributes
'''
Included in the django user model are these attributes:
Username, Password, Email address, firstname, surname
'''
class Post(models.Model):
title = models.CharField(max_length=40)
link = models.URLField(max_length=120, null = True, default = None)
content = models.CharField(max_length=4000)
slug = models.SlugField(max_length=40)
created_at = models.DateTimeField(editable=False)
updated_at = models.DateTimeField()
show = models.BooleanField(default=True)
votes = models.IntegerField(default=0)
user = models.ForeignKey(User, default = 1) # adds a FK
# this is a custom save method
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
self.updated_at = timezone.now()
# self.user = user
if not self.id:
self.created_at = timezone.now()
super(Post, self).save(*args, **kwargs)
# this create a dictionary from an object to use with ajax
def to_json(self):
return {
"title": self.title,
"link": self.link,
"content": self.content,
"slug": self.slug,
"created_at": self.created_at,
"show": self.show,
"votes": self.votes,
"user": self.user,
}
class Comment(models.Model):
title = models.CharField(max_length=40, default = None)
link = models.URLField(max_length=120, null = True, default = None)
content = models.CharField(max_length=4000)
slug = models.SlugField(max_length=40)
created_at = models.DateTimeField(editable=False)
show = models.BooleanField(default=True)
votes = models.IntegerField(default=0)
user = models.ForeignKey(User) # adds a FK for user
post = models.ForeignKey(Post) # adds a FK for the post it belongs to
# this is a custom save method
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
# self.user = user
if not self.id:
self.created_at = timezone.now()
super(Comment, self).save(*args, **kwargs)
# this create a dictionary from an object to use with ajax
def to_json(self):
return {
"title": self.title,
"link": self.link,
"content": self.content,
"slug": self.slug,
"created_at": self.created_at,
"show": self.show,
"votes": self.votes,
"user": self.user.id,
"post": self.post.id,
}
| [
"Janteby1@gmail.com"
] | Janteby1@gmail.com |
1e4b280808652a230bc92afa88050e3d9c6ff12e | 20230caa501da645e0c2665c991c41139f2d5fbe | /Fluke 8508/Fluke_8508_AutoCal_Procedure.py | b935832819dcee83e11d78882eaa0507157314c9 | [] | no_license | CBKirberger/AutoCal | 172ad1dd735b6b91dd52bd5c0b2150126f64b9f1 | e8a0372d723a47f5ec652fbd9f563443747c0d78 | refs/heads/master | 2022-12-11T11:23:13.019635 | 2020-09-14T14:25:40 | 2020-09-14T14:25:40 | 295,437,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 10:07:00 2020
This procedure is for the calibration of Fluke 8508 DMMs
Required Equipment
- Fluke 5730A
- Fluke 5725A
- Measurement International 10 GOhm Resistor
- Measurement International 1 GOhm Resistor
"""
from F8508 import F8508
from F5730A import F5730A
import master
import pandas as pd
########################################
# In the future use a WO number to autogenerate the test points. Cross reference
# the datasheet from the new WO with another parsed datasheet to determine if
# they are using a different revision/datasheet. If they are the user will have
# to manually make sure the test points have been correctly parsed.
TestPoints = pd.read_excel('TestPoints_Parsed_F8508.xlsx')
GroupNames = TestPoints.Group.unique()
if pd.isnull(GroupNames).any():
print('WARNING: At least one test point has not been assigned a testing group.\n')
############################################
# Open Instruments
'''
Fluke8508 = F8508()
Fluke5730 = F5730A()
'''
###############################################
# Below here the actual calibration takes place
###############################################
print('Please choose a test point group from the following options by typing in the group name or corresponding number.')
Gnum = 1
for G in GroupNames:
if not pd.isnull(G):
print(str(Gnum) + '. ' + str(G))
Gnum += 1
Group = input('Selection: ')
print('You have selected the group ' + str(Group) + '. Please ensure that your equipment is connected as shown in Figure 1.\n')\
# TODO: Add a display window which pops up with images of the lead connections, setup, all that jazz
Gotime = input('When you are ready to start taking data type "Start" and press enter: ')
while Gotime != 'Start':
print('Invalid Input')
Gotime = input('When you are ready to start taking data type "Start" and press enter: ')
| [
"clayton.kirberger@callabco.com"
] | clayton.kirberger@callabco.com |
1de92801c89bd649958d0acd38d16022bd2e57fa | f2337b0a0f29b87a255a607c2e5f7e890e74dbee | /Fib2.py | 1ab626efe03123d808f98be05bc352dbc72e696f | [] | no_license | harryc12a/PassFort | ec9e245a3f6762198c659927312d0ea5277966c3 | e141cc4a1a6de5a555da14616bfa51f67ceb247a | refs/heads/master | 2020-09-06T21:02:44.686808 | 2019-11-08T21:31:27 | 2019-11-08T21:31:27 | 220,550,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | # Code to find the first Fibonacci Number with 1001 digits
F1 = 1
F2 = 1
# First two Fibonacci Numbers
F3 = 2
# F3 Represents the next Fibonacci Number after F1 and F2
while F3 < 10**1000:
# The first Fibonacci Number with more than 1000 digits must have 1001 digits. First smallest number with 1001
# is 10^1000. So we go until we reach this point
F3 = F1 + F2
# Next Fibonacci Number is the sum of the previous 2
F1 = F2
F2 = F3
# Here we have just labelled
print('The first Fibonacci Number with 1001 digits is: \n%d' % F3)
| [
"noreply@github.com"
] | harryc12a.noreply@github.com |
b3dee51a6deb3dc61d76f2bcd67a643ecb144380 | 67d8173a716da10a7350213d98938aae9f2115ce | /LeetCode/LC_PY_ANSWERS/maximum-subarray.py | 0d629390e867ecf87df64b8a86941f76a49c225a | [
"MIT"
] | permissive | jxie0755/Learning_Python | 94490d41bdf93acf8396f843328e38b6da310b0f | 143422321cbc3715ca08f6c3af8f960a55887ced | refs/heads/master | 2021-11-02T22:47:35.790239 | 2021-09-26T04:26:23 | 2021-09-26T04:26:23 | 101,445,132 | 0 | 2 | null | 2019-02-19T15:48:44 | 2017-08-25T22:00:16 | Python | UTF-8 | Python | false | false | 399 | py | # Time: O(n)
# Space: O(1)
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if max(nums) < 0:
return max(nums)
global_max, local_max = 0, 0
for x in nums:
local_max = max(0, local_max + x)
global_max = max(global_max, local_max)
return global_max
| [
"30805062+jxie0755@users.noreply.github.com"
] | 30805062+jxie0755@users.noreply.github.com |
8aa2ab53f65dc4ef0b695374f57888bcda195e12 | c6c9d1259c9791d9f8e4ae21733f1a133c56a1b7 | /deiv.py | 6439031a26c2524215432d05751b0e8be997a9fa | [] | no_license | Rajadeivani/deiv | 9c101945774a863a0ad855892fd3a764db9cdca2 | a04c7c94d1389a0df16d6dc7106fc98f69d5a8ff | refs/heads/master | 2020-07-04T01:44:19.903343 | 2019-08-13T09:43:06 | 2019-08-13T09:43:06 | 202,112,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | start = 1
end = 10
for val in range(start, end + 1):
if val > 1:
for num in range(2, val):
if (val % num) == 0:
break
else:
print(val)
| [
"noreply@github.com"
] | Rajadeivani.noreply@github.com |
d4e4d3c783a559a4b0fbd29eb6a6af9d0e16777c | 6600891222661d0e869f34759b13aab32ad17be1 | /code/16-09-c13-vol2.py | 7b0dda007d2a77068c3c63779638f51df47e6038 | [] | no_license | witekwitkowska/LSTM | c2e6f26b862568c5330104812cfe217c9589fa79 | 16174d86b968df541720e9e223c11cefebf85773 | refs/heads/master | 2020-08-01T23:22:00.355073 | 2019-09-26T19:08:09 | 2019-09-26T19:08:09 | 206,821,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,140 | py | import tensorflow as tf
import datetime, os
from keras.preprocessing import sequence
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dense, Embedding, Dropout
from keras.layers import LSTM
from keras.callbacks import EarlyStopping, TensorBoard, ModelCheckpoint
import numpy as np
from os.path import exists, join, isfile
from os import listdir
from numpy import *
from keras.utils import to_categorical
import itertools as it
#cut out only dips from samples
from keras.preprocessing.sequence import pad_sequences
import matplotlib.pyplot as plt
import numpy as np
from os.path import exists, join, isfile
from os import listdir
import pandas as pd
import time
name = '-16-09-c13-vol2'
lr = 0.0001
#Load from raw data
#Load dominant class
dataFolders = sorted(listdir('/media/usuario/datos/raw-voltage-dips/'))
numClass = 13
classa_name = dataFolders[numClass]
classDips = 1000
nonClassDips = 70
testPercent = 0.8
x_train = []
y_train = []
x_test = []
y_test = []
dataFolders = ['/media/usuario/datos/raw-voltage-dips/' + f for f in dataFolders if exists(join('/media/usuario/datos/raw-voltage-dips/',f))]
#load class of interest
classa = dataFolders[numClass]
dipsList = [classa + '/' + f for f in listdir(classa) if isfile(join(classa,f))]
dipsCounter = 0
for dip in dipsList:
with open(dip, 'r') as d:
if dipsCounter < int(classDips*testPercent):
x_train.append(loadtxt(dip, usecols = (1,2,3)))
# x_train.append(loadtxt(dip))
y_train.append(0)
else:
x_test.append(loadtxt(dip, usecols = (1,2,3)))
# x_test.append(loadtxt(dip))
y_test.append(0)
dipsCounter = dipsCounter + 1
if dipsCounter >= classDips:
break
#Load rest of the data
for clas in dataFolders:
if clas=='/media/usuario/datos/raw-voltage-dips/0-1k_falla_1f':
continue
dipsCounter = 0
dipsList = [clas + '/' + f for f in listdir(clas) if isfile(join(clas,f))]
for dip in dipsList:
with open(dip, 'r') as d:
if dipsCounter < int(nonClassDips*testPercent):
x_train.append(loadtxt(dip, usecols = (1,2,3)))
# x_train.append(loadtxt(dip))
y_train.append(1)
else:
x_test.append(loadtxt(dip, usecols = (1,2,3)))
# x_test.append(loadtxt(dip))
y_test.append(1)
dipsCounter = dipsCounter + 1
if dipsCounter >= nonClassDips:
break
x_train= np.array(x_train)
x_train = x_train[:,25:300,:]
x_test = np.array(x_test)
x_test = x_test[:,25:300,:]
#data scaling
from pandas import Series
from sklearn.preprocessing import MinMaxScaler
x_train_norm = []
x_test_norm = []
# #scaling 0-1
# for f in x_train:
# scaler = MinMaxScaler()
# scaler.fit(f)
# x_train_norm.append(scaler.transform(f))
# for f in x_test:
# scaler = MinMaxScaler()
# scaler.fit(f)
# x_test_norm.append(scaler.transform(f))
#scaling -1 - 1
for f in x_train:
scaler = MinMaxScaler()
scaler.fit(f)
x_train_norm.append(scaler.transform(f)-0.5)
for f in x_test:
scaler = MinMaxScaler()
scaler.fit(f)
x_test_norm.append(scaler.transform(f)-0.5)
x_train_norm = np.array(x_train_norm)
x_test_norm = np.array(x_test_norm)
#convert to categorical (one-hot vector)
y2_train = to_categorical(y_train, num_classes=2, dtype='float32')
y2_test = to_categorical(y_test, num_classes=2, dtype='float32')
#MODEL
batch_size = 8
epochs = 600
model = Sequential()
model.add(LSTM(50,return_sequences=True, input_shape=x_train[1].shape,kernel_initializer='glorot_uniform'))
model.add(LSTM(50))
model.add(Dropout(0.2))
model.add(Dense(2, activation='softmax'))
adas = optimizers.Adam(lr=lr)
model.compile(loss='categorical_crossentropy', optimizer=adas, metrics=['accuracy'])
start = time.time()
history = model.fit(x_train_norm,y2_train, epochs = epochs, batch_size = batch_size, verbose = 2, validation_data = (x_test_norm, y2_test))
end = time.time()
import pickle
with open('/media/usuario/datos/results/history' + name, 'wb') as file_pi:
pickle.dump(history.history, file_pi)
# saving whole model
model.save('/media/usuario/datos/results/models/lstm_model'+ name +'.h5')
# %matplotlib inline
import pylab as plt
# Plot training & validation accuracy values
fig = plt.figure(figsize=(20, 10))
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy:' + classa_name)
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('/media/usuario/datos/results/charts/' + name + '-acc.png')
plt.close()
# plt.show()
# Plot training & validation loss values
fig = plt.figure(figsize=(20, 10))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss' + classa_name)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('/media/usuario/datos/results/charts/' + name + '-loss.png')
plt.close()
print('model'+ name + ' zakonczony powodzeniem w czasie: ', (end-start)/3600)
print(classa_name)
| [
"witek.witkowska@gmail.com"
] | witek.witkowska@gmail.com |
89d1bfc634202ee60f5cbc882161a20668ba5478 | e4fe9a2d3bd9e2a35cdfb058052b3b6e1911aea2 | /classification_based.py | 62df8ed4736f1744394f828b754092d6f1950dfd | [] | no_license | lvwuyunlifan/zero-shot-classification | 6b0bd534afc9dbfbfa87caaa77971b99f2fc86d3 | 310c282ebd25b915e612177c5c525be8c7c4737a | refs/heads/master | 2020-03-28T11:42:47.799467 | 2018-09-11T10:46:13 | 2018-09-11T10:46:13 | 148,240,673 | 0 | 0 | null | 2018-09-11T01:11:42 | 2018-09-11T01:11:41 | null | UTF-8 | Python | false | false | 3,733 | py | import tensorflow as tf
import random
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.01)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.01, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
with tf.device('/gpu:0'):
x = tf.placeholder(tf.float32, shape=[None, 32,32,3])
y_ = tf.placeholder(tf.float32, shape=[None, 8])
lr = tf.placeholder(tf.float32)
W_conv1 = weight_variable([3, 3, 3, 16])
b_conv1 = bias_variable([16])
h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([3, 3, 16, 32])
b_conv2 = bias_variable([32])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
W_conv3 = weight_variable([3, 3, 32, 128])
b_conv3 = bias_variable([128])
h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3) + b_conv3)
W_conv4 = weight_variable([3, 3, 128, 256])
b_conv4 = bias_variable([256])
h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4) + b_conv4)
h_pool4 = max_pool_2x2(h_conv4)
W_fc1 = weight_variable([8 * 8 * 256, 4096])
b_fc1 = bias_variable([4096])
h_pool4_flat = tf.reshape(h_pool4, [-1, 8*8*256])
h_fc1 = tf.nn.relu(tf.matmul(h_pool4_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([4096, 4096])
b_fc2 = bias_variable([4096])
h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)
W_fc3 = weight_variable([4096, 8])
b_fc3 = bias_variable([8])
y_conv=tf.nn.softmax(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(tf.clip_by_value(y_conv,1e-10,1.0)), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
sess.run(tf.initialize_all_variables())
def train(X_train, Y_train, X_validation, Y_validation):
train_tuple = zip(X_train, Y_train)
for i in range(10000):
batch = random.sample(train_tuple, 32)
batch_X = [j[0] for j in batch]
batch_Y = [j[1] for j in batch]
if i%1000==0:
with sess.as_default():
va = 0
for j in xrange(0, len(X_train), 32):
mx = min(j+32, len(X_train))
va = va + (accuracy.eval(feed_dict={x: X_train[j:mx], y_: Y_train[j:mx], keep_prob: 1.0}))*(mx-j)
va /= len(X_train)
print "train", va
va = 0
for j in xrange(0, len(X_validation), 32):
mx = min(j+32, len(X_validation))
va = va + (accuracy.eval(feed_dict={x: X_validation[j:mx], y_: Y_validation[j:mx], keep_prob: 1.0}))*(mx-j)
va /= len(X_validation)
print "validation", va
if i%10 == 0 and i!=0:
print "step", i, "loss", loss_val
_, loss_val = sess.run([train_step, cross_entropy], feed_dict={x:batch_X, y_: batch_Y, keep_prob: 0.5, lr: 2e-4})
def predict_probabilites(X):
prediction = sess.run([y_conv], feed_dict={x: X, keep_prob: 1.0})
return prediction[0]
| [
"18829209540@163.com"
] | 18829209540@163.com |
c86a0834de57457f5f08b2298537849ecdd95456 | 4109ebd03cc7905f32afdb8ca55f95e049b4dc0f | /modules/swagger-codegen/src/main/resources/flaskConnexion/app.mustache | 14af64be6f9ac0b3286fb68a3ca233f06d280ad3 | [
"Apache-2.0"
] | permissive | hellofresh/swagger-codegen | 83616c88f4010e5045d4e424ad4873d8eef63479 | 5c636650ed2357af83ae8b3467d78d33e441007c | refs/heads/master | 2023-06-28T14:38:15.878350 | 2016-01-04T10:40:01 | 2016-01-04T10:40:01 | 29,979,606 | 0 | 2 | NOASSERTION | 2023-06-16T10:52:30 | 2015-01-28T17:37:37 | Java | UTF-8 | Python | false | false | 244 | mustache | #!/usr/bin/env python3
import connexion
if __name__ == '__main__':
app = connexion.App(__name__, {{serverPort}},
specification_dir='./swagger/')
app.add_api('swagger.yaml', arguments={'title': '{{appDescription}}'})
app.run()
| [
"fehguy@gmail.com"
] | fehguy@gmail.com |
f65f5e898b5f6e18770d14905da24721aa97dd86 | 22595b26ff8a7a6d5da16478206b33008f4a4ee1 | /box_embeddings/modules/__init__.py | 0ee5a4f02c82bd552293809c9c44201f3cea63f2 | [] | no_license | amitgajbhiye/box-embeddings | cc7b12d69d18ae458346ea0eca399ba03f02515c | 874f1ee1302b93a8fb94e45a66707f5a6ea16628 | refs/heads/main | 2023-08-23T19:03:57.771408 | 2021-08-05T14:48:03 | 2021-08-05T14:48:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | import logging
from .. import torch_is_available, tensorflow_is_available
logger = logging.getLogger(__name__)
if torch_is_available():
from .embedding import BoxEmbedding
if tensorflow_is_available():
from .tf_embedding import TFBoxEmbedding
if not torch_is_available() and not tensorflow_is_available():
logger.warning("Can't find versions of Pytorch or Tensorflow")
| [
"noreply@github.com"
] | amitgajbhiye.noreply@github.com |
3d2f7d795d4ea98bd2799ac398530b67442f232d | 652507478240d7397feaee3864d0b9050a5da936 | /FPP/save_data.py | f0f74db28563ba4d61e3e5731f4dd6268d8930d9 | [] | no_license | ffp123/FPP | ba91ac64ba560f2e897926c588a3ad0d3dbb7e27 | 1dd999c17b5b2926982777f58714e89146115706 | refs/heads/master | 2022-08-20T22:51:47.345754 | 2020-06-08T10:17:08 | 2020-06-08T10:17:08 | 252,998,291 | 0 | 0 | null | 2022-07-29T22:40:35 | 2020-04-04T13:02:10 | Python | UTF-8 | Python | false | false | 4,671 | py | # -*- coding: utf-8 -*-
"""
@Time : 2020/5/9 13:02
@Author : tmooming
@File : save_data.py
@Description : 将数据存储
"""
import datetime
import pandas as pd
import psycopg2
import redis
import pandas
try:
redis_db = redis.Redis(host='127.0.0.1', port=12787, db=1) # 连接本地redis,db数据库默认连接到0号库,写的是索引值
postgres_connect = psycopg2.connect(host="127.0.0.1", port='12786', user="postgres", password="postgres",
dbname="FPP")
except:
redis_db = redis.Redis(host='127.0.0.1', port=6379, db=1)
postgres_connect = psycopg2.connect(host="127.0.0.1", port='5432', user="postgres", password="postgres",
dbname="FPP")
redis_data_dict = 'item_context' # key的名字,里面的内容随便写,这里的key相当于字典名称,而不是key值。为了后面引用而建的
class BaiduIndexPipline(object):
def __init__(self):
self.connection = postgres_connect
self.cur = self.connection.cursor()
redis_db.flushdb() # 清空当前数据库中的所有 key,为了后面将mysql数据库中的数据全部保存进去
# print(redis_db)
if redis_db.hlen(redis_data_dict) == 0: # 判断redis数据库中的key,若不存在就读取mysql数据并临时保存在redis中
# sql = 'select context from zhparser.scrapy_items' # 查询表中的现有数据
sql = 'select date,keyword,area,kind from baidu_index'
df = pandas.read_sql(sql, self.connection) # 读取mysql中的数据
df['area'] = df['area'].astype('str')
df['date'] = df['date'].astype('str')
df['data'] = df['date'].str.cat([df['keyword'], df['area'], df['kind']], sep='_')
for value in df['data']:
redis_db.hset(redis_data_dict, value, 0)
def close_spider(self):
self.cur.close()
self.connection.close()
def process_item(self, item):
if redis_db.hexists(redis_data_dict, '_'.join(
[item['date'], item['keyword'], str(item['area']), item['kind']])): # 比较的是redis_data_dict里面的field
print('已存在该数据')
else:
self.do_insert(item)
def do_insert(self, item):
flag = True
if flag:
try:
self.cur.execute(
"INSERT INTO baidu_index(date, keyword, area, kind,time_type, all_index, pc_index, wise_index) VALUES(%s,%s,%s,%s,%s,%s,%s,%s); ",
(item['date'],item['keyword'],item['area'],item['kind'],item['time_type'],item['all_index'],item['pc_index'],item['wise_index']))
except Exception as e:
print("错误", e)
self.connection.commit()
# else:
# print('测试')
class GoogleTrends(object):
def __init__(self):
self.connection = postgres_connect
self.cur = self.connection.cursor()
redis_db.flushdb() # 清空当前数据库中的所有 key,为了后面将mysql数据库中的数据全部保存进去
# print(redis_db)
if redis_db.hlen(redis_data_dict) == 0: # 判断redis数据库中的key,若不存在就读取mysql数据并临时保存在redis中
# sql = 'select context from zhparser.scrapy_items' # 查询表中的现有数据
sql = 'select date,keyword,cat,gprop,geo from google_trends'
df = pandas.read_sql(sql, self.connection) # 读取mysql中的数据
df['date'] = df['date'].astype('str')
df['data'] = df['date'].str.cat([df['keyword'], df['cat'], df['gprop'],df['geo']], sep='_')
for value in df['data']:
redis_db.hset(redis_data_dict, value, 0)
def close_spider(self):
self.cur.close()
self.connection.close()
def process_item(self, item):
if redis_db.hexists(redis_data_dict, '_'.join(
[item['date'], item['keyword'], item['cat'], item['gprop'],item['geo']])): # 比较的是redis_data_dict里面的field
print('已存在该数据')
else:
self.do_insert(item)
def do_insert(self, item):
flag = True
if flag:
try:
self.cur.execute(
"INSERT INTO google_trends(date, keyword, cat, gprop,geo, google_index) VALUES(%s,%s,%s,%s,%s,%s); ",
(item['date'],item['keyword'],item['cat'],item['gprop'],item['geo'],item[item['keyword']]))
except Exception as e:
print("错误", e)
self.connection.commit() | [
"finance_ffp@163.com"
] | finance_ffp@163.com |
711b58c83bfeff7ae943e7c0d498eecb18087314 | 028b28bda2eb04b1043f54b8251b11933f5616a8 | /parse_file.py | 85bf6af929c28f84832d03dec30715fb38eb648a | [] | no_license | zrongh90/system_monitor | e47225a4661354920b72cec62e1915e8d97cb559 | 416d6914dfb60da9325b535f648899978a2bbe3e | refs/heads/master | 2021-08-23T06:49:43.128068 | 2017-12-04T00:48:58 | 2017-12-04T00:48:58 | 105,982,318 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | import xlrd
from modules import System, WebSphere, DB2, db
def parse_file(in_file_path):
file = xlrd.open_workbook(in_file_path)
sheet = file.sheet_by_index(0)
system_list = []
for cur_row in range(1, sheet.nrows):
fs = [sheet.cell_value(cur_row, x) for x in [0, 1, 2, 5, 6, 7, 8, 9]]
one_system = System(inventory=fs[0], hostname=fs[1], os_info=fs[2], platform=fs[3], cpu_num=fs[4] , memory=fs[5])
system_list.append(one_system)
return system_list
def main_parse():
file_path = u'hostinfo_rhel_1709141108.xls'
return parse_file(file_path)
if __name__ == '__main__':
system_list = main_parse()
#from db_utils import DBSession
#one_was = WebSphere(max_mem=2048, curr_mem=1024, prf_name="profile2", srv_name="server2", sys_inventory="10.8.5.34")
#two_was = WebSphere(max_mem=2048, curr_mem=1024, prf_name="profile1", srv_name="server1", sys_inventory="192.168.2.69")
#three_was = WebSphere(max_mem=2048, curr_mem=1014, prf_name="profile3", srv_name="server3", sys_inventory="10.8.5.34")
#one_db2 = DB2(sys_inventory="11.8.8.220", inst_name="test_inst", db_name="test_db", listen_port=50002)
#session = DBSession()
#db.session.add(three_was)
#db.session.add(one_db2)
system_list_in = db.session.query(System).all()
for system in system_list_in:
db.session.delete(system)
db.session.add_all(system_list)
#db.session.add(one_was)
#db.session.add(two_was)
#systems = session.query(System).all()
#print(systems)
#was = session.query(WebSphere).all()
#print(was)
db.session.commit()
db.session.close()
| [
"397864223@qq.com"
] | 397864223@qq.com |
0636762bbad9464f94b94406db6f5d6e66ed737a | 0ecc787420fa4453f9150c010fbdfdd02709aa5f | /logs_analysis.py | 5b45ed2f4d231d4f2be63a5db55070ffb7977797 | [] | no_license | udayanshevade/FSND-Logs-Analysis | e8b86b057e3569317eddfb39ae4759bedb34c9d8 | 9ce2e9c3adcdd94fdb32d9e364b4da96ab97a61b | refs/heads/master | 2021-01-20T12:45:37.468443 | 2017-08-29T07:08:09 | 2017-08-29T07:08:09 | 101,719,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,183 | py | #!/usr/bin/python3
import psycopg2
DB_NAME = "news"
def connect_to_database(dbname):
"""Connects to database, returns connection"""
try:
db = psycopg2.connect(database=dbname)
c = db.cursor()
return db, c
except:
print("Unable to connect to database...")
sys.exit(1)
def get_query_results(query_string):
"""Queries database using the provided string"""
db, c = connect_to_database(DB_NAME)
# connect and execute specified query
c.execute(query_string)
vals = c.fetchall()
db.close()
# close and return selected values
return vals
def create_articles_view():
"""Creates a view aggregating all article visits by title and author"""
db, c = connect_to_database(DB_NAME)
articles_query_string = (
"CREATE OR REPLACE VIEW article_views AS "
"SELECT articles.author, articles.title, count(log.path) AS visited "
"FROM articles LEFT JOIN log "
"ON position(articles.slug in log.path) > 0 "
"GROUP BY articles.author, articles.title "
"ORDER BY visited DESC;"
)
c.execute(articles_query_string)
db.close()
def get_top_articles():
"""Fetches the 3 most visited articles"""
top_articles_query_string = (
"SELECT '\"' || title || '\"' as title, visited || ' views' "
"FROM article_views LIMIT 3;"
)
return get_query_results(top_articles_query_string)
def get_top_authors():
"""Fetches the 3 most popular authors"""
total_views = "sum(article_views.visited)"
top_authors_query_string = (
"SELECT authors.name, {0} || ' views' AS views "
"FROM authors, article_views "
"WHERE authors.id = article_views.author "
"GROUP BY authors.name "
"ORDER BY {0} DESC;"
).format(total_views)
return get_query_results(top_authors_query_string)
def get_days_with_errors():
"""Fetches dates on which the error rate exceeded a threshold"""
err_count = (
"round((count(position('200' in status) = 0 OR null) / "
"count(*)::float * 100)::numeric, 2)"
)
errors_query_string = (
"SELECT to_char(time, 'fmMonth DD, YYYY') AS date, "
"{0} || '% errors' AS err_count "
"FROM log "
"GROUP BY date "
"HAVING {0} > 1;"
).format(err_count)
return get_query_results(errors_query_string)
def write_results_to_file():
"""Write queried output to file"""
top_articles = get_top_articles()
top_authors = get_top_authors()
days_with_errors = get_days_with_errors()
questions = [
"What are the most popular three articles of all time?",
"Who are the most popular authors of all time?",
"On which days did more than 1% of the requests lead to errors?"
]
results = [top_articles, top_authors, days_with_errors]
with open("results.txt", "w") as f:
i = 0
for res_vals in results:
f.write("{}) {}\n\n".format(i + 1, questions[i]))
for val in res_vals:
f.write("{} -- {}\n".format(val[0], val[1]))
i += 1
if i < 3:
f.write("\n")
if __name__ == "main":
create_articles_view()
write_results_to_file()
| [
"udayan.shevade@gmail.com"
] | udayan.shevade@gmail.com |
bbc18d749c70f92caf44d513ddf6e2153608d0fa | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/14545402.py | c50679fcce12693bd16822034a843c5c2647f62b | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,031 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/14545402.py generated: Wed, 25 Jan 2017 15:25:31
#
# Event Type: 14545402
#
# ASCII decay Descriptor: [B_c+ -> (JPsi -> mu+ mu-) (tau+ -> pi+ pi+ pi- pi0 anti-nu_tau) nu_tau]cc
#
from Configurables import Generation
Generation().EventType = 14545402
Generation().SampleGenerationTool = "Special"
from Configurables import Special
Generation().addTool( Special )
Generation().Special.ProductionTool = "BcVegPyProduction"
Generation().PileUpTool = "FixedLuminosityForRareProcess"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bc_JpsiTauNu,pipipipi0nu=DecProdCut,ffKiselev.dec"
Generation().Special.CutTool = "BcDaughtersInLHCb"
from Configurables import BcDaughtersInLHCb
Generation().Special.addTool( BcDaughtersInLHCb )
Generation().Special.BcDaughtersInLHCb.NeutralThetaMin = 0.0
Generation().Special.BcDaughtersInLHCb.NeutralThetaMax = 3.14
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
0aa9c77c68e15f0666262f6cd256e1faf9890c3c | 8ab93bfc2e095d0ba83138e5898973eca992c200 | /CULane/lower_checkpoint.py | d1b9a60f6a7e2d7d4473b28cc7ff25539108dc80 | [] | no_license | Muen342/HAWP_road_predictions | d6edd5088a44b8ad5678157128fdf05dea35c6d8 | c7ba07bb1445be772ead6ba9d3e878cf1d28e382 | refs/heads/main | 2023-04-17T07:48:46.120008 | 2021-04-27T17:59:57 | 2021-04-27T17:59:57 | 357,369,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | f = open("outputs/hawp/last_checkpoint", "r")
num = f.read()[-6:-4]
print(num)
num = int(num) - 1
if(num < 10 and num > 0):
final = "outputs/hawp/model_0000" + str(num) + ".pth"
else:
final = "outputs/hawp/model_000" + str(num) + ".pth"
print(final)
f = open("outputs/hawp/last_checkpoint", "w")
f.write(final)
f.close() | [
"muen2001@hotmail.com"
] | muen2001@hotmail.com |
8f209c4f0a51bf7e93939459c66c36c05ae18754 | 0af30c2e3ddcc80a19ea9cfaad9d7e1fedf8b876 | /210304-210305/백) 2252 줄세우기/배진수.py | 2c14a3319c10d3c3eaae3e6497ecb1e101636b44 | [] | no_license | winterash2/algorithm_study_2021_1 | d1cd6077f71f68e7fc3eb6dfae7b2cc220885e4c | c1fee62c7e5e560c3bf7ae5e6166866d0147f23f | refs/heads/master | 2023-04-02T20:11:04.169856 | 2021-04-05T11:18:22 | 2021-04-05T11:18:22 | 327,563,535 | 1 | 2 | null | 2021-01-24T14:17:40 | 2021-01-07T09:28:08 | Python | UTF-8 | Python | false | false | 550 | py | from collections import deque
import sys
input = sys.stdin.readline
n, m = map(int, input().split())
indegree = [0] * (n+1)
graph = [[] for _ in range(n+1)]
for i in range(m):
a, b = map(int, input().split())
graph[a].append(b)
indegree[b] += 1
q = deque()
for i in range(1, n+1):
if indegree[i] == 0:
q.append(i)
result = []
while q:
x = q.popleft()
result.append(x)
for i in graph[x]:
indegree[i] -= 1
if indegree[i] == 0:
q.append(i)
for res in result:
print(res, end=" ")
| [
"jinsoo941010@naver.com"
] | jinsoo941010@naver.com |
10ca0fb1879fdcb9c6eb661bd58dc9d9b94b4e5a | c4634683e864e2e67f53762b9ff322985603764d | /web/accounts/models.py | 26a5591c4f35e6a31065e18564b3e5cd6a2e72db | [] | no_license | FoodDeepLearningInfosec/foodproject | 2ee119a348db4f1c884e384dd4ecb898c10674e7 | 1e1e33953c32825db1d89a741b36bd3197029cd4 | refs/heads/master | 2023-04-11T02:37:23.435434 | 2021-04-13T00:24:46 | 2021-04-13T00:24:46 | 353,586,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
from foods.models import Ingredient
# Create your models here.
class User(AbstractUser):
hateingredient = models.ManyToManyField(Ingredient, related_name='hatepeople', blank=True)
| [
"woodg1207@gmail.com"
] | woodg1207@gmail.com |
828c1cb256c150538b800671fce4719f3a98423e | 896353d76f5c068b3770d6ebda35d3e63a1a08c4 | /tests/integration/models/item_test.py | 525230971f7f79eaea7747e024f53d128aeca241 | [] | no_license | nchatzGitHub/stores-rest-api-test | 6f3e7732ed4187b784979394864cf4b4ba0f3022 | 0654cee4613aaa4161bb8c90d37ce692d8e82f55 | refs/heads/master | 2022-07-11T09:47:25.424419 | 2020-05-15T12:38:41 | 2020-05-15T12:38:41 | 264,169,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | from models.item import ItemModel
from tests.base_test import BaseTest
from models.store import StoreModel
class ItemTest(BaseTest):
def test_crud(self):
with self.app_context():
StoreModel(
'test').save_to_db() # We create an new Store(takes id=1) just because we create item with store_id=1.
item = ItemModel('test', 19.99, 1)
self.assertIsNone(ItemModel.find_by_name('test'),
"Found an item with name {}, but expected not to.".format(item.name))
item.save_to_db()
self.assertIsNotNone(ItemModel.find_by_name('test'))
item.delete_from_db()
self.assertIsNone(ItemModel.find_by_name('test'))
def test_store_relationship(self):
with self.app_context():
store = StoreModel('test_store') # All test_stores will have id==1 because we delete database of stores
item = ItemModel('test', 19.99, 1) # every time we finish a test with tearDown method in base_test
item.save_to_db()
store.save_to_db()
self.assertEqual(item.store_id, store.id)
self.assertEqual(item.store.name, 'test_store')
| [
"nchatzistamatis@gmail.com"
] | nchatzistamatis@gmail.com |
4795049c27f562e954254d66603b2ca95f05a3a7 | 988eb07ab1567b8b0a65d5a56e5ef83902f6a2f7 | /test_kfold.py | 6aed7acebb0197bb357b681a75816f40741fa7dc | [] | no_license | tmacccccc/Machine-learning-Hw | b30df8cb153603a8394f50a70d3430f2f8335607 | 5e9e1c687f88e63041d4a1ad586315ac78bdb458 | refs/heads/main | 2023-02-04T00:22:34.644844 | 2020-12-22T04:15:04 | 2020-12-22T04:15:04 | 321,515,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,750 | py | import numpy as np
from multiprocessing import Pool, Manager
import pickle
from time import time
np.seterr(divide='ignore', invalid='ignore')
class GMM_MLE(object):
def __init__(self, m0s, c0s, weights):
self.init_cache = (m0s, c0s, weights)
@classmethod
def get_gaussian(cls, data, mu, c):
n = mu.shape[0]
inv_c = np.linalg.inv(c)
constant = 1 / ((2*np.pi)**(n/2) * np.linalg.det(c) ** (1/2))
part1 = np.einsum('nx, xy -> ny', data - mu.T, inv_c)
power = np.einsum('ny, ny -> n', part1, data - mu.T)
return constant * np.exp(-0.5 * power)
@classmethod
def get_log_gaussian(cls, data, mu, c):
n = mu.shape[0]
inv_c = np.linalg.inv(c)
constant = 1 / ((2*np.pi)**(n/2) * np.linalg.det(c) ** (1/2))
part1 = np.einsum('nx, xy -> ny', data - mu.T, inv_c)
power = np.einsum('ny, ny -> n', part1, data - mu.T)
return np.log(constant) - 0.5 * power
@classmethod
def calc_expectation_cache(cls, data, current_cache):
m0s, c0s, weights = current_cache
# Li0: shape [n,]
gmm_Ls = [w * cls.get_gaussian(data, m, c) for w,m,c in zip(weights, m0s, c0s)]
Li0 = np.sum(gmm_Ls, axis=0)
# wik: shape [n,k]
wik = (gmm_Ls / Li0).T
return wik
@classmethod
def update_c(cls, data, m, weights):
part1 = np.einsum('nx,ny->nxy', data - m, data - m)
return np.einsum('n, nxy->xy', weights, part1) / weights.sum()
@classmethod
def update(cls, data, current_cache, exp_cache):
m0s, c0s, weights = current_cache
wik = exp_cache
pk = wik.sum(axis=0) / wik.shape[0]
m_weight = wik.T # [k,n]
m0s_new = np.einsum('nk,nd -> kd', wik, data) / np.sum(wik,axis=0)[...,np.newaxis] #[k,d]
c0s_new = [cls.update_c(data, mk, wk) for mk,wk in zip(m0s_new, m_weight)]
return m0s_new, c0s_new, pk
@classmethod
def calc_Q(cls, data, current_cache, exp_cache):
m0s, c0s, weights = current_cache
wik = exp_cache
#part_temp = [cls.get_gaussian(data, mk, ck) * wk
# for mk,ck,wk in zip(m0s,c0s,weights)]
#print(part_temp)
log_part = [cls.get_log_gaussian(data, mk, ck) * wk
for mk,ck,wk in zip(m0s,c0s,weights)]
m_weight = wik.T #[k,n]
part_0 = (log_part * m_weight).sum()
return part_0
def __call__(self, data, criteria=0.25, max_iter=1000):
Q_list = list()
init_exp_cache = self.calc_expectation_cache(data, self.init_cache)
current_cache = self.update(data, self.init_cache, init_exp_cache)
init_Q = self.calc_Q(data, self.init_cache, init_exp_cache)
Q_list.append(init_Q)
i = 0
while True:
exp_cache = self.calc_expectation_cache(data, current_cache)
new_cache = self.update(data, current_cache, exp_cache)
Q = self.calc_Q(data, new_cache, exp_cache)
Q_list.append(Q)
current_cache = new_cache
i += 1
if np.abs(Q - Q_list[-2]) < criteria:
break
if i > max_iter:
break
return Q_list, new_cache
class GMM_EM_M(GMM_MLE):
def __init__(self, m):
def rdm():
c = np.random.rand(1)[0] * 0.25 + 0.85
# print(c)
return c
mus = [np.array([3*x*rdm() ,5*y*rdm() ])
for x,y in zip(np.arange(-4,4,8/m), np.arange(-4,4,8/m))]
sigmas = [np.eye(2) * r * rdm() for r in np.arange(1,m+1)/4]
weights = [1/m] * m
super(GMM_EM_M, self).__init__(mus, sigmas, weights)
def train(self, data, criteria=0.25, max_iter=1000):
return super(GMM_EM_M, self).__call__(data, criteria=criteria, max_iter=max_iter)
def __call__(self, data, current_cache, bic=True):
m0s, c0s, weights = current_cache
# Li0: shape [n,]
gmm_Ls = [w * self.get_gaussian(data, m, c) for w,m,c in zip(weights, m0s, c0s)]
Li0 = np.sum(gmm_Ls, axis=0)
likelihood = np.log(Li0).sum()
if bic:
k = np.prod(m0s[0].shape) + np.prod(c0s[0].shape) + 1
k *= len(m0s)
result = -2 * likelihood + k * np.log(data.shape[0])
else:
result = likelihood
return result
def get_gaussian_MLE_parameters(data):
n = data.shape[0]
m_new = np.mean(data, axis = 0)
part1 = data - m_new #[n,2]
c_new = np.einsum('nx,yn -> xy', part1, part1.T)
return m_new, c_new
def generate_gmm(mus: list, sigmas: list, weights: list,
sample_size = 1000, seed=233):
"""
Generate samples for Multivariate-GMM
"""
rs = np.random.RandomState(seed)
assert len(mus) == len(sigmas)
assert len(sigmas) == len(weights)
components = np.stack([rs.multivariate_normal(mu, sigma, sample_size)
for mu, sigma in zip(mus, sigmas)])
comp_label = np.random.multinomial(1, weights, sample_size)
return np.einsum('cnd,nc->nd', components, comp_label)
def one_epoch_k_fold_test(train, val, value_dict, iter_idx):
likelihood_list = []
em = GMM_EM_M(1)
# m = 1
m, c = get_gaussian_MLE_parameters(train)
likelihood_list.append(em(val, [[m],[c],[1]], bic=False))
# m = 2:20
for m in range(2,21):
try:
em = GMM_EM_M(m)
a,b = em.train(train)
likelihood_list.append(em(val, b, bic=False))
except:
likelihood_list.append(np.nan)
"""
em = GMM_EM_M(m)
a,b = em.train(train)
likelihood_list.append(em(val, b, bic=False))
"""
value_dict[iter_idx] = likelihood_list
def k_fold(dataset, k):
data_size = len(dataset)
step = data_size // k
index = np.arange(k) * step
for i in range(k):
val_index = np.arange(index[i], index[i] + step)
train_index = list(set(range(data_size)).difference(val_index))
yield dataset[train_index], dataset[val_index]
if __name__ == '__main__':
# initialize mus and sigmas
Mus_list = [np.array([3*x,5*y])
for x,y in zip(np.arange(-4,4,0.5), np.arange(-4,4,0.5))]
Sigma_list = [np.eye(2) * r for r in np.arange(1,len(Mus_list)+1)/4]
np.random.shuffle(Sigma_list)
weights_list = np.array([1/len(Mus_list)]*len(Mus_list))
p = Pool(6)
file_name = {100: '100', 1000: '1000', 1e4: '1e4', 1e5: '1e5', 1e6: '1e6'}
for sample_size in [100, 1000, 1e4, 1e5]:
dataset_result = dict()
b = time()
print('Size {} begin training ...... '.format(int(sample_size)), end='')
for iters in range(20):
data = generate_gmm(Mus_list, Sigma_list, weights_list,
sample_size = int(sample_size),
seed = 100)
d = Manager().dict()
tasks = [(train, val, d, i) for i, (train, val) in enumerate(k_fold(data, 12))]
p.starmap(one_epoch_k_fold_test, tasks)
dataset_result[iters] = [v for _,v in d.items()]
e = time()
print('DONE! ({} mins)'.format(round((e - b)/60, 2)))
print('Size {} begin stroing ...... '.format(int(sample_size)), end='')
with open('test_result/kfold_{}_result.pkl'.format(int(sample_size)),'wb') as f:
pickle.dump(dataset_result, f)
print('DONE!')
p.terminate()
p.join()
| [
"noreply@github.com"
] | tmacccccc.noreply@github.com |
9866bcb709da8eb7a32c8435b3d540b7ed95699a | 32f0b173c93f1743d106eedeee79f2cb8d162676 | /mdtcollections/_version.py | 6be271f611b1afccdab2a3c635dd873f598ab8a5 | [
"Apache-2.0"
] | permissive | avirshup/mdtcollections | dee5d4ed2effb26b1dbe741d9c1a23df97af2797 | ae3ca1187a5738fd95e49d772be30b53b1eb09a3 | refs/heads/master | 2020-03-11T10:48:46.829746 | 2018-04-18T20:58:26 | 2018-04-18T20:58:26 | 129,952,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,455 | py |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "mdtcollections/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| [
"avirshup@gmail.com"
] | avirshup@gmail.com |
4706ed8b0f58ee78fc814b8f34e677dd65f4bb64 | 7667f883166ef0a3a97b51bcb51ffdc515ef5902 | /Packs/HealthCheck/Scripts/HealthCheckMemory/HealthCheckMemory.py | f407dfe71af5c22c4d03fe3ea3713ec3c816eb88 | [
"MIT"
] | permissive | arcus-team/content | 82c876271e083ee404629e0b87802ce193c84196 | e9f87dcbd4820f9ab57810396c43895e1f6d39b6 | refs/heads/master | 2023-08-25T11:05:11.968284 | 2021-10-18T08:27:25 | 2021-10-18T08:27:25 | 388,493,464 | 0 | 1 | MIT | 2021-10-17T15:18:53 | 2021-07-22T14:34:55 | Python | UTF-8 | Python | false | false | 4,478 | py | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
RESOLUTION = ["Performance Tuning of Cortex XSOAR Server: https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-0/"
"cortex-xsoar-admin/cortex-xsoar-overview/performance-tuning-of-cortex-xsoar-server"]
def analyzeData(res):
lowFound = 0
medFound = 0
lowRes = False
medRes = False
highRes = False
for item in res:
if not lowRes:
if item['data'][0] >= 70:
lowFound += 1
if lowFound >= 30:
lowRes = True
else:
lowFound = 0
if not medRes:
if item['data'][0] >= 80:
medFound += 1
if medFound >= 10:
medRes = True
else:
medFound = 0
if not highRes:
if item['data'][0] >= 90:
highRes = True
if lowRes or medRes or highRes:
addActions = []
if highRes:
addActions.append({'category': 'Memory analysis', 'severity': 'High',
'description': "Memory has reached 90%", "resolution": f"{RESOLUTION[0]}"})
if medRes:
addActions.append({'category': 'Memory analysis', 'severity': 'Medium',
'description': "Memory has reached 80% for 10 minutes", "resolution": f"{RESOLUTION[0]}"})
if lowRes:
addActions.append({'category': 'Memory analysis', 'severity': 'Low',
'description': "Memory has reached 70% for 30 minutes", "resolution": f"{RESOLUTION[0]}"})
return addActions
else:
return None
# Main
incident = demisto.incidents()[0]
accountName = incident.get('account')
accountName = f"acc_{accountName}/" if accountName != "" else ""
args = demisto.args()
isWidget = argToBoolean(args.get('isWidget', True))
stats = demisto.executeCommand(
"demisto-api-post",
{
"uri": f"{accountName}/statistics/widgets/query",
"body": {
"size": 1440,
"dataType": "system",
"params": {
"timeFrame": "minutes",
"format": "HH:mm",
},
"query": "memory.usedPercent",
"dateRange": {
"period": {
"byFrom": "hours",
"fromValue": 24
}
},
"widgetType": "line"
}
})
res = stats[0]["Contents"]["response"]
output = []
counter = 0
higher = 0
if isWidget is True:
buildNumber = demisto.executeCommand("DemistoVersion", {})[0]['Contents']['DemistoVersion']['buildNumber']
# in local development instances, the build number will be "REPLACE_THIS_WITH_CI_BUILD_NUM"
buildNumber = f'{buildNumber}' if buildNumber != "REPLACE_THIS_WITH_CI_BUILD_NUM" else "618658"
if int(buildNumber) >= 618657:
# Line graph:
for entry in res:
higher = max(entry["data"][0], higher)
if counter % 2 == 0:
output.append({"name": counter, "data": [higher]})
higher = 0
counter += 1
data = {
"Type": 17,
"ContentsFormat": "line",
"Contents": {
"stats": output,
"params": {
"timeFrame": "minutes",
"format": "HH:mm",
"layout": "vertical"
}
}
}
else:
# Bar graph:
now = datetime.utcnow()
then = now - timedelta(days=1)
for entry in res:
higher = max(entry["data"][0], higher)
if counter % 60 == 0:
then = then + timedelta(hours=1)
name = then.strftime("%H:%M")
output.append({"name": name, "data": [higher]})
higher = 0
counter += 1
data = {
"Type": 17,
"ContentsFormat": "bar",
"Contents": {
"stats": output,
"params": {
"layout": "horizontal"
}
}
}
demisto.results(data)
else:
addActions = analyzeData(res)
results = CommandResults(
readable_output="analyzeCPUUsage Done",
outputs_prefix="HealthCheck.ActionableItems",
outputs=addActions)
return_results(results)
| [
"noreply@github.com"
] | arcus-team.noreply@github.com |
266c4c6b5b38f3d1a3306cb234daa1ca747bc70e | 2681ea1866a1aafe342daa7a80fa5a4805df05fc | /Collect & Process/collect_news.py | 826c6a81c747efea2077f9052ea9e469f0072a81 | [] | no_license | manuelcoppotelli/SentiCheNews | 19ef21ace1220285d40016dcc3827e1f7c57dc0a | 39a95ebae2d5e3c3755d1f018c2131f069dfc766 | refs/heads/master | 2021-01-20T11:09:16.932991 | 2017-06-13T16:42:27 | 2017-06-13T16:42:27 | 85,826,045 | 0 | 0 | null | 2017-06-12T13:53:28 | 2017-03-22T12:39:45 | Python | UTF-8 | Python | false | false | 1,312 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import time
import shutil
from datetime import datetime
from config import *
from Collector.Saver import Saver
from Collector.FeedParser import FeedParser
SOURCES = {
'ILGI' : 'http://www.ilgiornale.it/feed.xml',
'ANSA' : 'http://www.ansa.it/sito/notizie/topnews/topnews_rss.xml',
'CORR' : 'http://xml.corriereobjects.it/rss/cronache.xml',
'REPU' : 'http://www.repubblica.it/rss/cronaca/rss2.0.xml'
}
counter = 0
COLLECT_NEWS_CALLS = 12
BREAK = 1800
saver = Saver('feed')
def collect_news():
for source, url in SOURCES.items():
for news in FeedParser(url).items():
try:
news = ' '.join(news.split())
saver.save("{}\t{}\t{}".format(datetime.now(), news, source))
except Exception as e:
print e, news
# ------------------------------------[main]------------------------------------
if __name__ == "__main__":
# CALL 'collect_news' FUNCTION EVERY 30 MINUTES FOR 6 HOURS
while counter < COLLECT_NEWS_CALLS:
collect_news()
counter +=1
time.sleep(BREAK) # WAIT 30 MINUTES
if not os.path.exists(PATH_UNPROCESSED_FOLDER):
os.mkdir(PATH_UNPROCESSED_FOLDER)
# FUNCTION THAT MOVES FILE/S FROM A SOURCE DIRECTORY TO A DESTINATION DIRECTORY
shutil.move(saver._file_path, PATH_UNPROCESSED_FOLDER + '/')
| [
"manuelcop@gmail.com"
] | manuelcop@gmail.com |
6098a38610281939fcec4ed54b2028d7b10e1839 | fe05e7bae169021cc48d300de98ccdc6d24cd4a2 | /self-study/regression in 20lines.py | 080f078f7c2505cdb52c13a54fc571d6eda28733 | [] | no_license | Dlyyy/self-study | 0136d41f3d307a37dd8f53a938ef04db2d195383 | 8ba4f7b38b870d89648aa3a28068180e681ea856 | refs/heads/master | 2020-03-28T15:48:11.944484 | 2019-01-25T08:16:41 | 2019-01-25T08:16:41 | 148,627,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 8 17:15:56 2018
@author: DLY
"""
import tensorflow as tf
## X and Y data
#x_train = [1, 2, 3]
#y_train = [1, 2, 3]
#
#W = tf.Variable(tf.random_normal([1]), name='weight')
#b = tf.Variable(tf.random_normal([1]), name='bias')
#
## Our hypothesis XW+b
#hypothesis = x_train * W + b
#
## cost/loss function
#cost = tf.reduce_mean(tf.square(hypothesis - y_train))
#
## Minimize
#optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
#train = optimizer.minimize(cost)
#
## Launch the graph in a session.
#sess = tf.Session()
## Initializes global variables in the graph.
#sess.run(tf.global_variables_initializer())
#
## Fit the line
#for step in range(2001):
# sess.run(train)
# if step % 20 == 0:
# print(step, sess.run(cost), sess.run(W), sess.run(b))
#Full code with placeholders
W = tf.Variable(tf.random_normal([1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
X = tf.placeholder(tf.float32, shape=[None])
Y = tf.placeholder(tf.float32, shape=[None])
# Our hypothesis XW+b
hypothesis = X * W + b
# cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - Y))
# Minimize
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(cost)
# Launch the graph in a session.
sess = tf.Session()
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())
# Fit the line
for step in range(2001):
cost_val, W_val, b_val, _ = sess.run([cost, W, b, train],
feed_dict={X: [1, 2, 3], Y: [1, 2, 3]})
if step % 20 == 0:
print(step, cost_val, W_val, b_val)
| [
"704220442@qq.com"
] | 704220442@qq.com |
536709eea987055807176cf2f8d1f0d341101005 | eaf002db3f01c9bbe4ba9163781176919930c727 | /queue2.py | 8fe4f2923dfff333562a747cab66767e2af8d5cb | [] | no_license | 54shady/pyall | 7aef9ee8547636e659be55db90eebaf4630d0c0e | e69099edd83d48beee4e56c085c3cf868df7b724 | refs/heads/master | 2021-07-01T04:46:28.339237 | 2019-03-22T22:38:02 | 2019-03-22T22:38:02 | 135,913,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | #!/usr/bin/env python
# coding=utf-8
from multiprocessing import Process, Queue
import time
import random
# 使用队列来实现进程间通信
# 不断向队列中写消息
def write(q):
while True:
if not q.full():
for value in str(random.random()):
print '==> %s' % value
q.put(value)
time.sleep(random.random())
else:
time.sleep(2)
# 不断读取队列中消息
def read(q):
while True:
if not q.empty():
v = q.get(True) # 等价于q.get_nowait()非阻塞
print '<== %s' % v
time.sleep(random.random())
else:
time.sleep(1)
if __name__ == '__main__':
# 父进程创建消息队列,并传给子进程
q = Queue(10)
pw = Process(target=write, args=(q,))
pr = Process(target=read, args=(q,))
# 启动子进程写消息
pw.start()
# 启动读进程读消息
pr.start()
# 等待读进程读完所有消息
pr.join()
# 等待写进程结束
pw.join()
| [
"M_O_Bz@163.com"
] | M_O_Bz@163.com |
2999311b3cf2e4411c8a088fee9ada56079021d9 | efcc00bc9ea988cd3c0e0a423369ca79ab528199 | /madlibs_flask/app/models.py | 47e818499c3b9d273a5fbf1a32e5151988a3eb6f | [] | no_license | femgineer/python | 8f94ad23b1771289ebe5fc8c385af5dc3409131e | d1804305c9edb1a198820f41017ee05e5d3b4d5e | refs/heads/master | 2021-08-19T00:21:11.569732 | 2017-11-24T09:54:35 | 2017-11-24T09:54:35 | 111,652,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | from app import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64))
password = db.Column(db.String(64))
nickname = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
stories = db.relationship('Story', backref='author', lazy='dynamic')
def __init__(self, username, password):
""""""
self.username = username
self.password = password
def __repr__(self):
return '<User %r>' % (self.nickname)
class Story(db.Model):
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(500))
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Story %r>' % (self.body) | [
"poornima@bizeebee.com"
] | poornima@bizeebee.com |
cd001607c0d12f47836682d3c40438c0cf067f0d | a7be04f67909c829b90244b4d79a2e5c3c16de22 | /FrozenLake/4x4-v3/FrozenLake-v3 q-learning.py | 4b7d171c3197e18e3a14d83bc7ea4151273efb62 | [] | no_license | icsl-machinelearning/Reinforcement-Learning | 2a2c3d967dc92feec9a7b7b5b6e73cae69403a8f | 7c6856bfeca54d368aad67d742961542880c7e01 | refs/heads/master | 2020-04-28T16:52:24.151628 | 2019-04-18T04:58:44 | 2019-04-18T04:58:44 | 175,426,559 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,436 | py | import gym
import numpy as np
import matplotlib.pyplot as plt
from gym.envs.registration import register
# Register FrozenLake with is_slippery False
register(
id= 'FrozenLake-v3',
entry_point= 'gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name': '4x4','is_slippery': False}
)
env = gym.make('FrozenLake-v3')
# Initialize table with all zeros
Q = np.zeros([env.observation_space.n, env.action_space.n])
# Discount fator
dis = 0.9
num_episodes = 2000
# Create lists to contain total rewards and steps per episode
rList = []
for i in range(num_episodes):
e = 1. / ((i//100)+1) # decaying e-greedy
# Reset environment and get first new observation
state = env.reset()
rAll = 0
done = False
# The Q-table learning algorithm
while not done:
#action = np.argmax(Q[state,:]+np.random.randn(1, env.action_space.n) / (i+1))
if np.random.rand(1) < e:
action = env.action_space.sample()
else:
action = np.argmax(Q[state,:])
new_state, reward, done,_ = env.step(action)
Q[state, action] = reward + dis*np.max(Q[new_state,:])
rAll += reward
state = new_state
rList.append(rAll)
print("Success rate: "+ str(sum(rList)/num_episodes))
print("Final Q-Table Values")
print("LEFT DOWN RIGHT UP")
print(Q)
plt.bar(range(len(rList)), rList, color="blue")
plt.show()
| [
"gosupia@naver.com"
] | gosupia@naver.com |
02c27832b197c96762cf331b9fb9c9fc0dfa52da | 9127d4f2d109013e35801e7290d1944714b378d3 | /sahara/plugins/vanilla/v2_7_1/edp_engine.py | 6d2f6566e2c3ba72f4b2a16fca60c38da497e131 | [
"Apache-2.0"
] | permissive | butterfy76/sahara | 20c841e8d8180993678283baa49c5dbc3dfbd7c3 | 376517328fd596fb016fb9f13d704221dd6ff707 | refs/heads/master | 2021-01-18T04:48:49.616129 | 2016-04-07T14:41:48 | 2016-04-07T14:41:48 | 56,059,790 | 1 | 0 | null | 2016-04-12T12:09:08 | 2016-04-12T12:09:08 | null | UTF-8 | Python | false | false | 1,663 | py | # Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.vanilla import confighints_helper as ch_helper
from sahara.plugins.vanilla.hadoop2 import edp_engine
from sahara.utils import edp
class EdpOozieEngine(edp_engine.EdpOozieEngine):
@staticmethod
def get_possible_job_config(job_type):
if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
return {'job_config': ch_helper.get_possible_hive_config_from(
'plugins/vanilla/v2_7_1/resources/hive-default.xml')}
if edp.compare_job_type(job_type,
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_MAPREDUCE_STREAMING):
return {'job_config': ch_helper.get_possible_mapreduce_config_from(
'plugins/vanilla/v2_7_1/resources/mapred-default.xml')}
if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
return {'job_config': ch_helper.get_possible_pig_config_from(
'plugins/vanilla/v2_7_1/resources/mapred-default.xml')}
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
| [
"alex.barreto@disney.com"
] | alex.barreto@disney.com |
3c37d288af3cb4a01411c4ce327d07b575d90fe5 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/eraint_scripts/api_requests/all_apiRequests/sst/era_interim_sst_1986_.py | f2bf19431e1f7238b329ff0c816694cfd0224bff | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 19 12:20:00 2020
ERA-Interim api request template
@author: Michael Tadesse
"""
import os
os.chdir('D:\\data\\era_interim\\era_interim_netcdf')
#!/usr/bin/env python
from ecmwfapi import ECMWFDataServer
server = ECMWFDataServer()
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": '1986-01-01/to/1986-12-31',
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": '34.128',
"step": "0",
"stream": "oper",
"time": "00:00:00/06:00:00/12:00:00/18:00:00",
"type": "an",
"format": "netcdf",
"target": 'era_interim_sst_1986_.nc',
}) | [
"WahlInstall@CECSGS1RCP2.net.ucf.edu"
] | WahlInstall@CECSGS1RCP2.net.ucf.edu |
1a16f94746339dcfadb1ab5b1b59fcd32df59a4a | 0374a272bc3406811abc922a802231728b7993f2 | /analytics/migrations/0002_auto_20150723_0255.py | ed7ddb0060b4865b12b1bede75537a0335b784e6 | [] | no_license | marcogx/psps | 586f45448d43a2fad60e7caee6f484381e19bc4a | b688d6a99b0eb6f5ee25d847d2b335f4ffe8e08f | refs/heads/master | 2021-01-15T12:28:13.837951 | 2015-07-25T14:58:14 | 2015-07-25T14:58:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('analytics', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='pageview',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2015, 7, 23, 2, 55, 4, 904120, tzinfo=utc)),
),
]
| [
"gx239@nyu.edu"
] | gx239@nyu.edu |
c41450509f3d4073ab6b2423ee62859b4f065260 | c8851a7b9fc2e69c1f3bb2eacc8375d516c7b17b | /xitorch/_tests/test_interp.py | 5d9e30c27409bbb05836eeb6b8437f52ba7d5c86 | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | udemirezen/xitorch | c5e1188d83cd1834d5b218662dd6d5624fb13873 | cc440c8cb6ecef1dac58d7c2ec217fe94cbcf165 | refs/heads/master | 2023-08-16T14:08:35.705338 | 2021-10-20T17:35:44 | 2021-10-20T17:35:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,257 | py | import warnings
import torch
from torch.autograd import gradcheck, gradgradcheck
from xitorch.interpolate.interp1 import Interp1D
from xitorch._tests.utils import device_dtype_float_test
@device_dtype_float_test(only64=True, additional_kwargs={
"bc_type": ["clamped", "natural", "not-a-knot", "periodic", None],
"scramble": [False, True]
})
def test_interp1_cspline(dtype, device, bc_type, scramble):
dtype_device_kwargs = {"dtype": dtype, "device": device}
x = torch.tensor([0.0, 0.2, 0.3, 0.5, 0.8, 1.0], **dtype_device_kwargs).requires_grad_()
if bc_type != "periodic":
y1 = torch.tensor([1.0, 1.5, 2.1, 1.1, 2.3, 2.5], **dtype_device_kwargs).requires_grad_()
y2 = torch.tensor([[1.0, 1.5, 2.1, 1.1, 2.3, 2.5],
[0.8, 1.2, 2.2, 0.4, 3.2, 1.2]], **dtype_device_kwargs).requires_grad_()
else:
y1 = torch.tensor([1.0, 1.5, 2.1, 1.1, 2.3, 1.0], **dtype_device_kwargs).requires_grad_()
y2 = torch.tensor([[1.0, 1.5, 2.1, 1.1, 2.3, 1.0],
[0.8, 1.2, 2.2, 0.4, 3.2, 0.8]], **dtype_device_kwargs).requires_grad_()
# points are well inside to avoid extrapolation in numerical gradient calculations
xq1 = torch.linspace(0.05, 0.95, 10, **dtype_device_kwargs)
xq2 = torch.linspace(0.05, 0.95, 4, **dtype_device_kwargs)
scramble = scramble and bc_type != "periodic"
if scramble:
idx1 = torch.randperm(len(xq1))
idx2 = torch.randperm(len(xq2))
xq1 = xq1[..., idx1]
xq2 = xq2[..., idx2]
xq1 = xq1.requires_grad_()
xq2 = xq2.requires_grad_()
# true results (obtained from scipy.interpolate.CubicSpline)
# from scipy.interpolate import CubicSpline
# print("yq11:", CubicSpline(x.detach(), y1.detach(), bc_type=bc_type)(xq1.detach()))
# print("yq12:", CubicSpline(x.detach(), y1.detach(), bc_type=bc_type)(xq2.detach()))
# print("yq21:", CubicSpline(x.detach(), y2[1].detach(), bc_type=bc_type)(xq1.detach()))
# print("yq22:", CubicSpline(x.detach(), y2[1].detach(), bc_type=bc_type)(xq2.detach()))
# get the y_trues from scipy
if bc_type == "clamped":
yq11_true = torch.tensor([1.01599131, 1.23547394, 1.85950467, 2.02868906, 1.37102567, 1.04108172,
1.42061722, 2.04849297, 2.4435166, 2.5061722],
**dtype_device_kwargs)
yq12_true = torch.tensor([1.01599131, 2.02868906, 1.42061722, 2.5061722], **dtype_device_kwargs)
yq21_true = torch.tensor([[1.01599131, 1.23547394, 1.85950467, 2.02868906, 1.37102567, 1.04108172,
1.42061722, 2.04849297, 2.4435166, 2.5061722],
[0.76740145, 0.85220436, 1.79469225, 2.01628631, 0.78122407, 0.53357346,
1.80606846, 3.07316928, 2.80705394, 1.48568465]],
**dtype_device_kwargs)
yq22_true = torch.tensor([[1.01599131, 2.02868906, 1.42061722, 2.5061722],
[0.76740145, 2.01628631, 1.80606846, 1.48568465]],
**dtype_device_kwargs)
elif bc_type == "not-a-knot" or bc_type is None: # default choice
yq11_true = torch.tensor([0.66219741, 1.06231845, 1.8959342, 2.01058952, 1.36963168, 1.02084725,
1.33918614, 1.97824847, 2.56027129, 2.70749165],
**dtype_device_kwargs)
yq12_true = torch.tensor([0.66219741, 2.01058952, 1.33918614, 2.70749165], **dtype_device_kwargs)
yq21_true = torch.tensor([[0.66219741, 1.06231845, 1.8959342, 2.01058952, 1.36963168, 1.02084725,
1.33918614, 1.97824847, 2.56027129, 2.70749165],
[-0.01262521, 0.47242487, 1.87087507, 1.99610601, 0.81846828, 0.39785058,
1.33699082, 2.68769477, 3.43433639, 2.56128965]],
**dtype_device_kwargs)
yq22_true = torch.tensor([[0.66219741, 2.01058952, 1.33918614, 2.70749165],
[-0.01262521, 1.99610601, 1.33699082, 2.56128965]],
**dtype_device_kwargs)
elif bc_type == "natural":
yq11_true = torch.tensor([1.03045416, 1.24263582, 1.85784168, 2.03025785, 1.37277695, 1.03808008,
1.41177844, 2.04167374, 2.45428693, 2.52449066],
**dtype_device_kwargs)
yq12_true = torch.tensor([1.03045416, 2.03025785, 1.41177844, 2.52449066], **dtype_device_kwargs)
yq21_true = torch.tensor([[1.03045416, 1.24263582, 1.85784168, 2.03025785, 1.37277695, 1.03808008,
1.41177844, 2.04167374, 2.45428693, 2.52449066],
[0.70073217, 0.82102504, 1.79853565, 2.02728778, 0.8104202, 0.46318855,
1.57916384, 2.89143794, 3.09930603, 1.98521859]],
**dtype_device_kwargs)
yq22_true = torch.tensor([[1.03045416, 2.03025785, 1.41177844, 2.52449066],
[0.70073217, 2.02728778, 1.57916384, 1.98521859]],
**dtype_device_kwargs)
elif bc_type == "periodic":
yq11_true = torch.tensor([0.88184647, 1.16754002, 1.87806756, 1.99916778, 1.3241823, 1.13211374,
1.69017244, 2.25696675, 2.09041608, 1.31247223],
**dtype_device_kwargs)
yq12_true = torch.tensor([0.88184647, 1.99916778, 1.69017244, 1.31247223], **dtype_device_kwargs)
yq21_true = torch.tensor([[0.88184647, 1.16754002, 1.87806756, 1.99916778, 1.3241823, 1.13211374,
1.69017244, 2.25696675, 2.09041608, 1.31247223],
[0.46559344, 0.70408188, 1.82662341, 1.99677022, 0.77170332, 0.52939286,
1.76540093, 3.03216372, 2.8731096, 1.44347038]],
**dtype_device_kwargs)
yq22_true = torch.tensor([[0.88184647, 1.99916778, 1.69017244, 1.31247223],
[0.46559344, 1.99677022, 1.76540093, 1.44347038]],
**dtype_device_kwargs)
if scramble:
yq11_true = yq11_true[..., idx1]
yq12_true = yq12_true[..., idx2]
yq21_true = yq21_true[..., idx1]
yq22_true = yq22_true[..., idx2]
def interp(x, y, xq):
return Interp1D(x, y, method="cspline", bc_type=bc_type)(xq)
yq11 = interp(x, y1, xq1)
yq12 = interp(x, y1, xq2)
yq21 = interp(x, y2, xq1)
yq22 = interp(x, y2, xq2)
# import matplotlib.pyplot as plt
# from scipy.interpolate import CubicSpline
# xx = torch.linspace(0, 1, 1000, **dtype_device_kwargs)
# xx2 = torch.linspace(-1, 2, 1000, **dtype_device_kwargs)
# plt.plot(xx2, interp(x, y1, xx2).detach().numpy())
# plt.plot(xx, CubicSpline(x.detach(), y1.detach(), bc_type=bc_type)(xx.detach()))
# plt.plot(x.detach(), y1.detach(), 'x')
# plt.show()
if bc_type == "periodic":
rtol = 2e-2
else:
rtol = 1e-3
assert torch.allclose(yq11, yq11_true, rtol=rtol)
assert torch.allclose(yq12, yq12_true, rtol=rtol)
assert torch.allclose(yq21, yq21_true, rtol=rtol)
assert torch.allclose(yq22, yq22_true, rtol=rtol)
# skip the gradient check if bc_type is None
if bc_type is None:
return
gradcheck(interp, (x, y1, xq1))
gradcheck(interp, (x, y1, xq2))
gradcheck(interp, (x, y2, xq1))
gradcheck(interp, (x, y2, xq2))
gradgradcheck(interp, (x, y1, xq1))
gradgradcheck(interp, (x, y1, xq2))
gradgradcheck(interp, (x, y2, xq1))
gradgradcheck(interp, (x, y2, xq2))
@device_dtype_float_test(only64=True, additional_kwargs={
"scramble": [False, True]
})
def test_interp1_linear(dtype, device, scramble):
dtype_device_kwargs = {"dtype": dtype, "device": device}
x = torch.tensor([0.0, 0.2, 0.3, 0.5, 0.8, 1.0], **dtype_device_kwargs).requires_grad_()
y1 = torch.tensor([1.0, 1.5, 2.1, 1.1, 2.3, 2.5], **dtype_device_kwargs).requires_grad_()
y2 = torch.tensor([[1.0, 1.5, 2.1, 1.1, 2.3, 2.5],
[0.8, 1.2, 2.2, 0.4, 3.2, 1.2]], **dtype_device_kwargs).requires_grad_()
# points are well inside to avoid extrapolation in numerical gradient calculations
xq1 = torch.linspace(0.05, 0.95, 10, **dtype_device_kwargs)
xq2 = torch.linspace(0.05, 0.95, 4, **dtype_device_kwargs)
if scramble:
idx1 = torch.randperm(len(xq1))
idx2 = torch.randperm(len(xq2))
xq1 = xq1[..., idx1]
xq2 = xq2[..., idx2]
xq1 = xq1.requires_grad_()
xq2 = xq2.requires_grad_()
# # true results (obtained from scipy.interpolate.interp1d)
# from scipy.interpolate import interp1d
# print("yq11:", interp1d(x.detach(), y1.detach())(xq1.detach()))
# print("yq12:", interp1d(x.detach(), y1.detach())(xq2.detach()))
# print("yq21:", interp1d(x.detach(), y2[1].detach())(xq1.detach()))
# print("yq22:", interp1d(x.detach(), y2[1].detach())(xq2.detach()))
yq11_true = torch.tensor([1.125, 1.375, 1.8, 1.85, 1.35, 1.3, 1.7, 2.1, 2.35, 2.45],
**dtype_device_kwargs)
yq12_true = torch.tensor([1.125, 1.85, 1.7, 2.45], **dtype_device_kwargs)
yq21_true = torch.tensor([[1.125, 1.375, 1.8, 1.85, 1.35, 1.3, 1.7, 2.1, 2.35, 2.45],
[0.9, 1.1, 1.7, 1.75, 0.85, 0.86666667, 1.8, 2.73333333, 2.7, 1.7]],
**dtype_device_kwargs)
yq22_true = torch.tensor([[1.125, 1.85, 1.7, 2.45],
[0.9, 1.75, 1.8, 1.7]],
**dtype_device_kwargs)
if scramble:
yq11_true = yq11_true[..., idx1]
yq12_true = yq12_true[..., idx2]
yq21_true = yq21_true[..., idx1]
yq22_true = yq22_true[..., idx2]
def interp(x, y, xq):
return Interp1D(x, y, method="linear")(xq)
yq11 = interp(x, y1, xq1)
yq12 = interp(x, y1, xq2)
yq21 = interp(x, y2, xq1)
yq22 = interp(x, y2, xq2)
# import matplotlib.pyplot as plt
# from scipy.interpolate import interp1d
# xx = torch.linspace(0, 1, 1000, **dtype_device_kwargs)
# xx2 = torch.linspace(-1, 2, 1000, **dtype_device_kwargs)
# plt.plot(xx2, interp(x, y1, xx2).detach().numpy())
# plt.plot(xx, interp1d(x.detach(), y1.detach())(xx.detach()))
# plt.plot(x.detach(), y1.detach(), 'x')
# plt.show()
assert torch.allclose(yq11, yq11_true)
assert torch.allclose(yq12, yq12_true)
assert torch.allclose(yq21, yq21_true)
assert torch.allclose(yq22, yq22_true)
gradcheck(interp, (x, y1, xq1))
gradcheck(interp, (x, y1, xq2))
gradcheck(interp, (x, y2, xq1))
gradcheck(interp, (x, y2, xq2))
gradgradcheck(interp, (x, y1, xq1))
gradgradcheck(interp, (x, y1, xq2))
gradgradcheck(interp, (x, y2, xq1))
gradgradcheck(interp, (x, y2, xq2))
@device_dtype_float_test(only64=True)
def test_interp1_unsorted(dtype, device):
dtype_device_kwargs = {"dtype": dtype, "device": device}
x = torch.tensor([0.0, 0.2, 0.3, 0.5, 0.8, 1.0], **dtype_device_kwargs).requires_grad_()
y1 = torch.tensor([1.0, 1.5, 2.1, 1.1, 2.3, 2.5], **dtype_device_kwargs).requires_grad_()
y2 = torch.tensor([[1.0, 1.5, 2.1, 1.1, 2.3, 2.5],
[0.8, 1.2, 2.2, 0.4, 3.2, 1.2]], **dtype_device_kwargs).requires_grad_()
# points are well inside to avoid extrapolation in numerical gradient calculations
xq1 = torch.linspace(0.05, 0.95, 10, **dtype_device_kwargs)
xq2 = torch.linspace(0.05, 0.95, 4, **dtype_device_kwargs)
def interp(x, y, xq):
return Interp1D(x, y, method="linear")(xq)
def interp2(x, y, xq):
return Interp1D(x, method="linear")(xq, y)
# calculate the interpolated value with sorted x
yq11 = interp(x, y1, xq1)
yq12 = interp(x, y1, xq2)
yq21 = interp(x, y2, xq1)
yq22 = interp(x, y2, xq2)
# scramble x and y1 and y2
idx1 = torch.randperm(len(x))
x = x[..., idx1]
y1 = y1[..., idx1]
y2 = y2[..., idx1]
# calculate the interpolated value with unsorted x
yq11_u = interp(x, y1, xq1)
yq12_u = interp(x, y1, xq2)
yq21_u = interp(x, y2, xq1)
yq22_u = interp(x, y2, xq2)
yq11_u2 = interp2(x, y1, xq1)
yq12_u2 = interp2(x, y1, xq2)
yq21_u2 = interp2(x, y2, xq1)
yq22_u2 = interp2(x, y2, xq2)
assert torch.allclose(yq11, yq11_u)
assert torch.allclose(yq12, yq12_u)
assert torch.allclose(yq21, yq21_u)
assert torch.allclose(yq22, yq22_u)
assert torch.allclose(yq11, yq11_u2)
assert torch.allclose(yq12, yq12_u2)
assert torch.allclose(yq21, yq21_u2)
assert torch.allclose(yq22, yq22_u2)
@device_dtype_float_test(only64=True, additional_kwargs={
"method": ["cspline", "linear"]
})
def test_interp1_editable_module(dtype, device, method):
dtype_device_kwargs = {"dtype": dtype, "device": device}
x = torch.tensor([0.0, 0.2, 0.3, 0.5, 0.8, 1.0], **dtype_device_kwargs).requires_grad_()
y = torch.tensor([[1.0, 1.5, 2.1, 1.1, 2.3, 2.5],
[0.8, 1.2, 2.2, 0.4, 3.2, 1.2]], **dtype_device_kwargs).requires_grad_()
xq = torch.linspace(0, 1, 10, **dtype_device_kwargs).requires_grad_()
cls1 = Interp1D(x, y, method=method)
cls2 = Interp1D(x, method=method)
with warnings.catch_warnings():
warnings.simplefilter("error")
cls1.assertparams(cls1.__call__, xq)
cls2.assertparams(cls2.__call__, xq, y)
@device_dtype_float_test(only64=True)
def test_extrap(dtype, device):
dtype_device_kwargs = {"dtype": dtype, "device": device}
x = torch.tensor([0.0, 0.2, 0.3, 0.5, 0.8, 1.0], **dtype_device_kwargs).requires_grad_()
y1 = torch.tensor([[1.0, 2.1, 1.5, 1.1, 2.3, 2.5],
[0.0, 1.2, 2.2, 0.4, 3.2, 1.2]], **dtype_device_kwargs).requires_grad_()
xq1 = torch.tensor([0.0, 1. / 3, 2. / 3, 3. / 3, -1. / 3, -1.0, -4. / 3, 4. / 3,
6. / 3, 7. / 3, 9. / 3], **dtype_device_kwargs).requires_grad_()
# true results (obtained from scipy.interpolate.CubicSpline)
nan = float("nan")
yq_nan_true = torch.tensor([
[1., 1.3127193, 1.7445744, 2.5, nan, nan, nan, nan, nan, nan, nan],
[0., 2.13368966, 1.82654566, 1.2, nan, nan, nan, nan, nan, nan, nan],
], **dtype_device_kwargs)
yq_mir_true = torch.tensor([
[1., 1.3127193, 1.7445744, 2.5, 1.3127193, 2.5, 1.7445744, 1.7445744, 1., 1.3127193, 2.5],
[0., 2.13368966, 1.82654566, 1.2, 2.13368966, 1.2, 1.82654566, 1.82654566, 0., 2.13368966, 1.2],
], **dtype_device_kwargs)
yq_bnd_true = torch.tensor([
[1., 1.3127193, 1.7445744, 2.5, 1., 1., 1., 2.5, 2.5, 2.5, 2.5],
[0., 2.13368966, 1.82654566, 1.2, 0., 0., 0., 1.2, 1.2, 1.2, 1.2],
], **dtype_device_kwargs)
yq_1_true = torch.tensor([
[1., 1.3127193, 1.7445744, 2.5, 1., 1., 1., 1., 1., 1., 1.],
[0., 2.13368966, 1.82654566, 1.2, 1., 1., 1., 1., 1., 1., 1.],
], **dtype_device_kwargs)
cal = lambda x: x * 2.
yq_cal_true = torch.tensor([
[1., 1.3127193, 1.7445744, 2.5, -2. / 3, -2., -8. / 3, 8. / 3, 12. / 3, 14. / 3, 18. / 3],
[0., 2.13368966, 1.82654566, 1.2, -2. / 3, -2., -8. / 3, 8. / 3, 12. / 3, 14. / 3, 18. / 3],
], **dtype_device_kwargs)
extraps = ["nan", "mirror", "bound", 1.0, cal]
yq_trues = [yq_nan_true, yq_mir_true, yq_bnd_true, yq_1_true, yq_cal_true]
def interp(x, y, xq, extrap):
return Interp1D(x, y, extrap=extrap, method="cspline", bc_type="natural")(xq)
for extrap, yq_true in zip(extraps, yq_trues):
print("Extrap: %s" % extrap)
yq = interp(x, y1, xq1, extrap=extrap)
assert torch.allclose(yq, yq_true, equal_nan=True)
| [
"firman.kasim@gmail.com"
] | firman.kasim@gmail.com |
077386b64aacbf83fd91c7547837c9d93a04cfd2 | 812124722699dd70ad0a1ecf7b458785d91c9dc9 | /1two_sum.py | 4a4eb1949c38380abd4f8fbd4396a2c8b15cb2ed | [] | no_license | rensiqi7/leetcodepython | cfed8d06e10842074428a866908c36fc68962311 | de1aa52a60a0f3ff645ba43a938ee20d9c3bdc4d | refs/heads/master | 2020-03-18T16:12:41.014486 | 2018-12-12T07:10:20 | 2018-12-12T07:10:20 | 134,953,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
hash_map = {}
for index, value in enumerate(nums):
hash_map[value] = index
for index1, value in enumerate(nums):
if target - value in hash_map:
index2 = hash_map[target - value]
if index1 != index2:
return [index1, index2]
"""
look_for = {}
for n,x in enumerate(nums):
try:
return look_for[x], n
except KeyError:
look_for.setdefault(target - x,n)
"""
'''
Here you build the dictionary of values on an as-needed basis. The dictionary
is keyed by the values you are seeking, and for each value you track the index
of its first appearance. As soon as you come to a value that satisfies the
problem, you're done. There is only one for loop. The only other detail is to
add 1 to each index to satisfy the ridiculous requirement that the indices be
1-based. Like that's going to teach you about Python programming. Keys are
added to the dictionary using the setdefault function, since if the key is
already present you want to keep its value (the lowest index).
'''
| [
"rensiqi384906149@yahoo.co.jp"
] | rensiqi384906149@yahoo.co.jp |
283432dc3d24805d579de21867db80ce87ddeaec | a1c76477a4792bcb00fbd31d3a38f51a95146f6c | /exemploFlowbox.py | fd141f04543eedb9245415dc2fd9c901a2e585cc | [] | no_license | MarioBlancosoto/repasoPython | bbde0b2a0d0f6b9aaff3927cd71047601a7c5e3f | 243c16b3623df4af7567099427565486169f8033 | refs/heads/master | 2021-09-07T10:27:05.876831 | 2018-02-21T17:27:06 | 2018-02-21T17:27:06 | 106,548,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,005 | py | import gi
from moduloGrid import GridModificado
gi.require_version("Gtk","3.0")
from gi.repository import Gtk,Gio
class ventanaPrincipal(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Exemplo Gtk.Flowbox")
self.set_default_size(500,350)
self.set_border_width(5)
cabeceira = Gtk.HeaderBar(title="Exemplo FlowBox")
cabeceira.set_subtitle("Exemplo de headerBar")
cabeceira.props.show_close_button = True
btnBoton = Gtk.Button()
icono = Gio.ThemedIcon(name ="mail-send-receive-symbolic")
imaxe = Gtk.Image.new_from_gicon(icono,Gtk.IconSize.BUTTON)
btnBoton.add(imaxe)
cabeceira.pack_end(btnBoton)
caixa = Gtk.Box(orientation = Gtk.Orientation.HORIZONTAL)
Gtk.StyleContext.add_class(caixa.get_style_context(),"linked")
btnFrechaI = Gtk.Button()
btnFrechaI.add(Gtk.Arrow(Gtk.ArrowType.LEFT,Gtk.ShadowType.NONE))
caixa.add(btnFrechaI)
btnFrechaD = Gtk.Button()
btnFrechaD.add(Gtk.Arrow(Gtk.ArrowType.RIGHT, Gtk.ShadowType.NONE))
caixa.add(btnFrechaD)
cabeceira.pack_start(caixa)
#Configuración do FlowBox
flowBox = Gtk.FlowBox()
flowBox.set_valign(Gtk.Align.START)
flowBox.set_max_children_per_line(30)
flowBox.set_selection_mode(Gtk.SelectionMode.NONE)
self.crea_flowbox(flowBox)
scroll = Gtk.ScrolledWindow()
#Valores de set_policy(valor horizontal,valor vertical)
scroll.set_policy(Gtk.PolicyType.NEVER,Gtk.PolicyType.AUTOMATIC)
#metemos el flowbox al scrollbar
scroll.add(flowBox)
#y el scroll bar a la window
self.add(scroll)
self.set_titlebar(cabeceira)
self.connect("delete-event",Gtk.main_quit)
self.show_all()
def crea_flowbox(self,flowbox):
for i in range (20):
flowbox.add(GridModificado())
if __name__ == "__main__":
ventanaPrincipal()
Gtk.main() | [
"986882601"
] | 986882601 |
f5d0ec3698a7ce7c0cc50833f105f5253acc81ca | 2ac7aa5aeb2849c8f6a79c9b13ff74d87c78beaa | /testfolder/accounts/models.py | 5e01c4bd4a3c0d2f7c738b802c4fdb2ddbb1d9cf | [] | no_license | steadily-worked/Django | 292ec4880a260ae817299a1f3911b9dccbd4c6e6 | e8d451a0d17ba38965700a3cfabb49df9858f6a0 | refs/heads/master | 2022-11-19T04:32:18.037364 | 2020-07-21T08:44:44 | 2020-07-21T08:44:44 | 276,822,767 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | from django.db import models
from django.contrib.auth.models import User
class Signup(models.Model):
user = models.OneToOneField(User, on_delete = models.CASCADE)
major = models.CharField(max_length=200)
phone = models.CharField(max_length=30) | [
"qpwpep5429@gmail.com"
] | qpwpep5429@gmail.com |
77a28391a5b77537334f95a4b0f383c41b98bd5d | bd958f3dedd58e55f0025b93e63b1683a6d7acce | /Week_7/w7-weekend/postscrape/postscrape/spiders/post_spider.py | 585d096392cbb76f7ce454be16523dcbc21cf50a | [] | no_license | Vanderscycle/lighthouse-data-notes | 2d6faf468910e0ee4d8fd2eb0b9520c6998737fd | 000907c61a38b207d36cbb2c70959fb531f81af9 | refs/heads/master | 2023-03-01T16:47:14.835183 | 2021-02-10T17:25:51 | 2021-02-10T17:25:51 | 295,590,618 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py | import scrapy
# part one of the scrappy adventure
# class PostsSpider(scrapy.Spider):
# name = 'posts'
# start_urls = [
# 'https://blog.scrapinghub.com/page/1',
# 'https://blog.scrapinghub.com/page/2'
# ]
# def parse(self,response):
# page = response.url.split('/')[-1]
# filename = 'post-%s.html' % page
# with open(filename,'wb') as f:
# f.write(response.body)
"""In [45]: for post in response.css("div.post-item"):
...: title = post.css('.post-header h2 a::text')[0].get()
...: date = post.css('.post-header a::text')[1].get()
...: author = post.css('.post-header a::text')[2].get()
...: print(dict(title=title,date=date,authro=author))"""
class PostsSpider(scrapy.Spider):
name = 'posts'
start_urls = [
'https://blog.scrapinghub.com'] #works only for the frontpage
def parse(self,response):
for post in response.css('div.post-item'):
yield {
'title' : post.css('.post-header h2 a::text')[0].get(),
'date' : post.css('.post-header a::text')[1].get(),
'author' : post.css('.post-header a::text')[2].get()
}
next_page = response.css('a.next-posts-link::attr(href)').get()
if next_page is not None:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse)
| [
"hvandersleyen@gmail.com"
] | hvandersleyen@gmail.com |
2f0979b4ad168181fd6e7b3df1f5c5751b73cfa1 | d3c0f19552f6a56e20a3782ddfdfd8bfba7203a4 | /venv/lib/python3.5/site-packages/cvxpy/reductions/solvers/solver.py | 0ba9f44ce323cf182cdbbdd8ae5230bf048598cc | [] | no_license | johnjaniczek/SFCLS | 8697924778ef235b96449a53990c5370b00606e2 | 35c3dfc12488106faed0075e790d47ce3a4aaa43 | refs/heads/master | 2020-05-18T10:43:39.106296 | 2019-07-31T18:29:44 | 2019-07-31T18:29:44 | 184,351,669 | 13 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,422 | py | """
Copyright 2017 Robin Verschueren
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import abc
from collections import defaultdict
from cvxpy.reductions.reduction import Reduction
def group_constraints(constraints):
"""Organize the constraints into a dictionary keyed by constraint names.
Paramters
---------
constraints : list of constraints
Returns
-------
dict
A dict keyed by constraint types where dict[cone_type] maps to a list
of exactly those constraints that are of type cone_type.
"""
constr_map = defaultdict(list)
for c in constraints:
constr_map[type(c)].append(c)
return constr_map
class Solver(Reduction, metaclass=abc.ABCMeta):
"""Generic interface for a solver that uses reduction semantics
"""
# Solver capabilities.
MIP_CAPABLE = False
# Keys for inverse data.
VAR_ID = 'var_id'
EQ_CONSTR = 'eq_constr'
NEQ_CONSTR = 'other_constr'
@abc.abstractmethod
def name(self):
"""The name of the solver.
"""
return NotImplemented
@abc.abstractmethod
def import_solver(self):
"""Imports the solver.
"""
return NotImplemented
def is_installed(self):
"""Is the solver installed?
"""
try:
self.import_solver()
return True
except ImportError:
return False
@abc.abstractmethod
def solve_via_data(self, data, warm_start, verbose, solver_opts, solver_cache=None):
"""Solve a problem represented by data returned from apply.
"""
return NotImplemented
def solve(self, problem, warm_start, verbose, solver_opts):
"""Solve the problem and return a Solution object.
"""
data, inv_data = self.apply(problem)
solution = self.solve_via_data(data, warm_start, verbose, solver_opts)
return self.invert(solution, inv_data)
| [
"jjjanicz@asu.edu"
] | jjjanicz@asu.edu |
e261ead41f0892c0829916d5fe64243e6d5e89c4 | 3b275735d81b847e59aab5733371d4694962b702 | /python/cusignal/demod/demod.py | 860ac5ac8d67d5a265c9900cb6eba015cb5da9e0 | [
"LicenseRef-scancode-other-permissive"
] | permissive | rapidsai/cusignal | 65c6f0c3781de58acee51196491e11069026cbb3 | f21f593e0892ea02f2e591a2595e9b65faf4753c | refs/heads/branch-23.10 | 2023-09-03T01:39:12.591418 | 2023-08-28T13:31:31 | 2023-08-28T13:31:31 | 203,811,620 | 719 | 132 | NOASSERTION | 2023-08-28T13:31:33 | 2019-08-22T14:27:27 | Python | UTF-8 | Python | false | false | 1,661 | py | # Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import cupy as cp
def fm_demod(x, axis=-1):
"""
Demodulate Frequency Modulated Signal
Parameters
----------
x : ndarray
Received complex valued signal or batch of signals
Returns
-------
y : ndarray
The demodulated output with the same shape as `x`.
"""
x = cp.asarray(x)
if cp.isrealobj(x):
raise AssertionError("Input signal must be complex-valued")
x_angle = cp.unwrap(cp.angle(x), axis=axis)
y = cp.diff(x_angle, axis=axis)
return y
| [
"noreply@github.com"
] | rapidsai.noreply@github.com |
0a6122350523487279b6ce789c570507f65e70de | 5fe8d63f78f8dec8c65097cd1d2fe585a22236ed | /lcall_bug/__init__.py | 9cd7cd57c8d4ebbd0792ed8e237b299a980d6e7a | [] | no_license | dpb1/snapcraft-lcall-bug | e8de01bf4628a1354147999e7b3b240b83b8fdcb | 653d67e2294a9e1dc0481f8defc7d516ed4fa72c | refs/heads/master | 2020-03-16T20:30:52.508578 | 2018-05-10T23:13:17 | 2018-05-10T23:13:17 | 132,961,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | def main():
print("""💩""")
if __name__ == '__main__':
main()
| [
"davidpbritton@gmail.com"
] | davidpbritton@gmail.com |
4620b4562951e1aa59973d2f9d3b050ecb0b3830 | 88bb8a4efd97ee20ce25c9eeefccb520cd375022 | /newsmeme/forms/__init__.py | e13ea5b9476dacea1794d2fc3d737f1311abff90 | [
"BSD-3-Clause"
] | permissive | yangjiandong/newsmeme | f3851f0ec52e94fc6cb96b0c7f9585dfbe3f4800 | 2f2eb1c7d8e112c6af675795bc165d50c158ab34 | refs/heads/master | 2020-05-27T08:43:12.023107 | 2012-11-07T08:56:07 | 2012-11-07T08:56:07 | 5,538,259 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | # -*- coding: utf-8 -*-
"""
forms.py
~~~~~~~~
Description of the module goes here...
:copyright: (c) 2010 by Dan Jacob.
:license: BSD, see LICENSE for more details.
"""
from .account import LoginForm, SignupForm, EditAccountForm, \
RecoverPasswordForm, ChangePasswordForm, DeleteAccountForm
from .openid import OpenIdLoginForm, OpenIdSignupForm
from .post import PostForm
from .contact import ContactForm, MessageForm
from .comment import CommentForm, CommentAbuseForm
| [
"young.jiandong@gmail.com"
] | young.jiandong@gmail.com |
c08ba764550ea6f6d34cc9ec92dd1229eb113269 | 93ab050518092de3a433b03744d09b0b49b541a6 | /Intermediario/tratamento de erros/assertions.py | 79b1b90a0894388d0e62a1df9a33ed5e2c5d83b8 | [
"MIT"
] | permissive | ggsant/pyladies | 1e5df8772fe772f8f7d0d254070383b9b9f09ec6 | 37e11e0c9dc2fa2263ed5b42df5a395169408766 | refs/heads/master | 2023-01-02T11:49:44.836957 | 2020-11-01T18:36:43 | 2020-11-01T18:36:43 | 306,947,105 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 244 | py |
#** Assertion é um tipo de checagem que pode ser feita para garantir que o seu codigo nao esta fazendo algo obviamente errado
ages = [26,57,92,54,22,15,17,80,47,73]
ages.sort()
ages[15,17,22,26,47,54,57,73,80,92]
assert ages[0] <= ages[-1] | [
"61892998+ggsant@users.noreply.github.com"
] | 61892998+ggsant@users.noreply.github.com |
341f5c0a42620d59ee3bc428931b0681b3e7633a | cbf3551bab38a341d4b9693ffafdd382214b338e | /basics/code/文件/excel-rw.py | 42024b07b269e7a1f1bea3f3a199211852aa402c | [] | no_license | Duome/python_gold | bcd558ee1ba69a3c15755d0362669150e5eaab07 | 9f783184ffa3162bcff63e3c3c0e1cd1e2490620 | refs/heads/master | 2020-12-30T11:15:53.944171 | 2018-10-27T12:56:42 | 2018-10-27T12:56:59 | 91,547,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | # -*- coding: utf-8 -*-
__author__ = 'Duome'
""" 读取excel内容,存入内存中,内存结构为字典
"""
from openpyxl import *
from openpyxl.compat import range
# 获取数据
cont = {}
work_book = load_workbook(filename='anjuke.xlsx')
sheetname = work_book.get_sheet_names()
for i in sheetname:
cont[i] = []
sheet = work_book.get_sheet_by_name(i)
row_nums = len(sheet['A'])
col_nums = len(sheet['1'])
for row in range(row_nums):
cont[i].append([])
for col in range(col_nums):
cont[i][row].append(sheet.cell(row=row+1, column=col+1).value)
# 新建excel,写入数据
new_book = Workbook()
worksheet = new_book.active
new_book.remove_sheet(worksheet)
for sheet_name in sheetname:
worksheets = new_book.create_sheet(title='%s' % sheet_name)
sheet_cont = cont[sheet_name]
for row in sheet_cont:
worksheets.append(row)
new_book.save('new_book.xlsx')
| [
"duomesiki@163.com"
] | duomesiki@163.com |
f3b2b24732f99d02ca4bbb20dc47ce18ce23fcc4 | 68046bcbf9c8fd562f3445457f2da486f3227b76 | /project/serializers.py | eae5ab37536283dcaa75d86ad37974a60b246c85 | [] | no_license | sherifsakr/evaluation | 5ad733d3de4747edb66e2dfeb511529a386d75ad | fcdb05729553cacb98cc7e99f5da5da5f71d9c37 | refs/heads/master | 2020-03-12T13:40:04.784476 | 2018-04-30T05:48:44 | 2018-04-30T05:48:44 | 130,546,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | # from rest_framework import serializers
# from .models import Employee, Evaluation
#
#
#
# class EmployeeSerializer(serializers.ModelSerializer):
#
# class Meta:
# model = Employee
# #fields = '__all__' # all model fields will be included
# fields = ('empname',)
#
#
# class EvaluationSerializer(serializers.ModelSerializer):
# employeeid = EmployeeSerializer(many=False, read_only=True)
#
# class Meta:
# model = Evaluation
# fields = '__all__' # all model fields will be included
# #fields = ('id', 'name', 'employee')
| [
"sherif@B2-F1-002"
] | sherif@B2-F1-002 |
e0921f2763a0971b0f38b7568b4c85a1eac01336 | 76e28cee20fb6bd50247ef2bbb14e8ac57c304cc | /appdata/admin.py | 74dd84f4cecec74bde6e068e0c02764ee878dcab | [] | no_license | gurpreet1205/Blood-Bank-Management | 87a700400e025b03434d224dc7ed5174eae0f65b | 176903433437e747562029913b3bee7145dbe90c | refs/heads/master | 2021-04-26T22:21:19.795275 | 2018-03-13T19:27:24 | 2018-03-13T19:27:24 | 124,078,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | from django.contrib import admin
from appdata.models import City
from appdata.models import State
from appdata.models import BloodGroupType
from appdata.models import Facts
from appdata.models import NotDonate
from appdata.models import Medication
from appdata.models import FamousQuotes
from appdata.models import Details
from appdata.models import UrgentBlood
from appdata.models import Availability
admin.site.register(City)
admin.site.register(State)
admin.site.register(BloodGroupType)
admin.site.register(Facts)
admin.site.register(NotDonate)
admin.site.register(Medication)
admin.site.register(FamousQuotes)
admin.site.register(Details)
admin.site.register(UrgentBlood)
admin.site.register(Availability)
| [
"noreply@github.com"
] | gurpreet1205.noreply@github.com |
a77fb5676beb1cd37ccad48108112c82ef4967bd | 437dde1cdcdb4e2dfa66181ec02f3b7bce9376ac | /examples/examples_v1.x.x/machine_learning/mlxtend_example.py | 543f4ea75df1752cc0a6203f368ad4f678d4a534 | [
"MIT"
] | permissive | chrinide/Hyperactive | dd994703c233cbfb67a21e0b4c8e939528da11f2 | 3a068d7808b1b4ed714560403d227f48e588f0a7 | refs/heads/master | 2022-11-09T03:21:06.257160 | 2020-06-24T07:37:53 | 2020-06-24T07:37:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import cross_val_score
from mlxtend.classifier import EnsembleVoteClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from hyperactive import Hyperactive
data = load_breast_cancer()
X, y = data.data, data.target
def model(para, X, y):
gbc = GradientBoostingClassifier(
n_estimators=para["n_estimators"], max_depth=para["max_depth"]
)
mlp = MLPClassifier(hidden_layer_sizes=para["hidden_layer_sizes"])
svc = SVC(gamma="auto", probability=True)
eclf = EnsembleVoteClassifier(
clfs=[gbc, mlp, svc], weights=[2, 1, 1], voting="soft"
)
scores = cross_val_score(eclf, X, y, cv=3)
return scores.mean()
search_config = {
model: {
"n_estimators": range(10, 100, 10),
"max_depth": range(2, 12),
"hidden_layer_sizes": (range(10, 100, 10),),
}
}
opt = Hyperactive(search_config, n_iter=30)
opt.search(X, y)
| [
"simonblanke@hotmail.de"
] | simonblanke@hotmail.de |
e78ef16bc86fa77716669089fd2f815920ba0c89 | 077314de25c8152ae08bc37b1c73b3b934bab19c | /server.py | 1acea687a2c8092df813f1e16ee432eba7d42e6f | [] | no_license | Djapec/Flask-mini | 3928a17b7e7078f7e4536be9c88bcb624f434876 | e6212fcd9b2ed99d008bad79b6e43ddea9d959c1 | refs/heads/master | 2022-10-08T15:38:51.991755 | 2019-10-31T23:10:22 | 2019-10-31T23:10:22 | 214,277,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | import os
from flask_app import createApp
from flask_app import db
os.environ['FLASK_ENV_TYPE'] = 'Development'
if os.environ['FLASK_ENV_TYPE'] == 'Development':
from config.development import Development as Config
elif os.environ['FLASK_ENV_TYPE'] == 'Production':
from config.production import Production as Config
else:
raise Exception('Not proper FLASK_ENV_TYPE set.')
app = createApp(Config)
@app.route('/')
def hello():
# db.create_all()
return 'Heeey' | [
"pedjadjape97@hotmail.rs"
] | pedjadjape97@hotmail.rs |
829a0a8f6cf279dc5abab8e70e5355cac96e8e56 | 2f47343c8c587e90d759e2c6bb236f91c1303c62 | /lab4_lab6_kolke/lab4_lab6_kolke/urls.py | c92662d174c1122a6993f2f91ae60e9b099d81e5 | [] | no_license | KolRobOsk/aplikacje-internetowe-21720-185ic | d2db5da740d38230e4b78941e20fecea1b547e5e | 696aeed88b550e990456ac03c3e3eabc5a0f6263 | refs/heads/main | 2023-03-24T17:38:40.991574 | 2021-03-30T07:15:58 | 2021-03-30T07:15:58 | 315,331,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,033 | py | """lab4_lab6_kolke URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Lab 4 i Lab 6 21720",
default_version='v1',
description="Lab 4 Lab 6 21720",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="contact@snippets.local"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path('admin/', admin.site.urls),
path('api/v1/', include('lab4_lab6.urls')),
path('api-auth/', include('rest_framework.urls')),
path('api/v1/rest-auth/', include('rest_auth.urls')),
path('api/v1/rest-auth/registration/', include('rest_auth.registration.urls')),
# swagger urls
# w dokumentacji online jast url() zamiast re_path(), ale od Django 3.1 url() jest wycofane:
# https://docs.djangoproject.com/en/3.1/ref/urls/#django.urls.re_path
re_path(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
re_path(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
re_path(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
| [
"robertoskarkolke@gmail.com"
] | robertoskarkolke@gmail.com |
1d35638f02a1be255171831a9c64bc46d2a13242 | 42528f5dcd3e2d4adbbb0e370a8298ff62e6c679 | /Structual/Composite/Group.py | 6b6e1f1001602cc4ef14580b0c6361d9fed60879 | [] | no_license | mohamedelashhab/design-pattern | 5b36d54ed7a141220c86ddff92dea6622f8c3b7e | da5b7d06b4f93a427a7499d98a9e970b61ce6b97 | refs/heads/master | 2022-11-27T16:10:24.825512 | 2020-08-08T15:13:23 | 2020-08-08T15:13:23 | 276,177,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | from .Component import Component
class Group(Component):
def __init__(self):
self.__objects = []
def render(self):
for object in self.__objects:
object.render()
def add(self, shape):
self.__objects.append(shape)
| [
"elashhab_fcih@yahoo.com"
] | elashhab_fcih@yahoo.com |
11d3d447590953692cde67858c3ac4fb46a7d376 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/common/serializable_types/customizations/__init__.py | 98de618134f24f9cb63a0ae1a8dc3cb30950fb03 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 1,423 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/common/serializable_types/customizations/__init__.py
from serialization import parseCompDescr
from serialization.serializable_component import SerializableComponentChildType
from .attachment import AttachmentComponent
from .camouflage import CamouflageComponent
from .customization_outfit import CustomizationOutfit, getAllItemsFromOutfit
from .decal import DecalComponent
from .insignia import InsigniaComponent
from .paint import PaintComponent
from .personal_number import PersonalNumberComponent
from .projection_decal import ProjectionDecalComponent
from .sequence import SequenceComponent
__all__ = ('AttachmentComponent', 'CamouflageComponent', 'CustomizationOutfit', 'getAllItemsFromOutfit', 'DecalComponent', 'InsigniaComponent', 'PaintComponent', 'PersonalNumberComponent', 'ProjectionDecalComponent', 'SequenceComponent', 'CUSTOMIZATION_CLASSES', 'parseC11sComponentDescr')
CUSTOMIZATION_CLASS_LIST = [AttachmentComponent,
CamouflageComponent,
CustomizationOutfit,
DecalComponent,
InsigniaComponent,
PaintComponent,
PersonalNumberComponent,
ProjectionDecalComponent,
SequenceComponent]
CUSTOMIZATION_CLASSES = {subClass.customType:subClass for subClass in CUSTOMIZATION_CLASS_LIST}
def parseC11sComponentDescr(customizationElementCompDescr):
return parseCompDescr(CUSTOMIZATION_CLASSES, customizationElementCompDescr)
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
654061c6473c217f0b8970013775efe874756de0 | 41d1f7b5bfd451b55d6a01756a426692f32141ca | /Problem008.py | 634d4dd9849c1ae9739fd3e4987c5fce025888b7 | [] | no_license | thefowlj/project-euler-python | b60452177230a0b476d3098e18e31d5f845f4629 | fd7de87f5233aecb78dfc89239beb646d48d86b2 | refs/heads/master | 2022-03-25T13:48:46.789946 | 2020-01-06T12:03:02 | 2020-01-06T12:03:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,841 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Project Euler Problem 8
# The four adjacent digits in the 1000-digit number that have the greatest product
# are 9 × 9 × 8 × 9 = 5832.
#
# 73167176531330624919225119674426574742355349194934
# 96983520312774506326239578318016984801869478851843
# 85861560789112949495459501737958331952853208805511
# 12540698747158523863050715693290963295227443043557
# 66896648950445244523161731856403098711121722383113
# 62229893423380308135336276614282806444486645238749
# 30358907296290491560440772390713810515859307960866
# 70172427121883998797908792274921901699720888093776
# 65727333001053367881220235421809751254540594752243
# 52584907711670556013604839586446706324415722155397
# 53697817977846174064955149290862569321978468622482
# 83972241375657056057490261407972968652414535100474
# 82166370484403199890008895243450658541227588666881
# 16427171479924442928230863465674813919123162824586
# 17866458359124566529476545682848912883142607690042
# 24219022671055626321111109370544217506941658960408
# 07198403850962455444362981230987879927244284909188
# 84580156166097919133875499200524063689912560717606
# 05886116467109405077541002256983155200055935729725
# 71636269561882670428252483600823257530420752963450
#
# Find the thirteen adjacent digits in the 1000-digit number that have the
# greatest product. What is the value of this product?
a = "73167176531330624919225119674426574742355349194934"
b = "96983520312774506326239578318016984801869478851843"
c = "85861560789112949495459501737958331952853208805511"
d = "12540698747158523863050715693290963295227443043557"
e = "66896648950445244523161731856403098711121722383113"
f = "62229893423380308135336276614282806444486645238749"
g = "30358907296290491560440772390713810515859307960866"
h = "70172427121883998797908792274921901699720888093776"
i = "65727333001053367881220235421809751254540594752243"
j = "52584907711670556013604839586446706324415722155397"
k = "53697817977846174064955149290862569321978468622482"
l = "83972241375657056057490261407972968652414535100474"
m = "82166370484403199890008895243450658541227588666881"
n = "16427171479924442928230863465674813919123162824586"
o = "17866458359124566529476545682848912883142607690042"
p = "24219022671055626321111109370544217506941658960408"
q = "07198403850962455444362981230987879927244284909188"
r = "84580156166097919133875499200524063689912560717606"
s = "05886116467109405077541002256983155200055935729725"
t = "71636269561882670428252483600823257530420752963450"
numString = a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+r+s+t
i = 0
n = 13
numStringLen = len(numString)
products = []
while((i+n)<=numStringLen):
numbers = numString[i:(i+n)]
product = int(numbers[0])
for j in range(1,n):
product *= int(numbers[j])
products.append(product)
i+=1
print max(products)
| [
"noreply@github.com"
] | thefowlj.noreply@github.com |
fefed23fb80f9dc72cc874f3823d84599f25c499 | 529363570ad12b671dcefae22bd705bfcced02b0 | /docs/conf.py | 328788faa41fca726cace6d9095e415e942caf04 | [
"MIT"
] | permissive | adboy316/porfolio_website | 90c3ba29745397026ca3207b00e8f38becbfb151 | 100e5338ab314397deadea9cb5c15b46eb721d89 | refs/heads/master | 2020-07-05T15:47:50.758979 | 2019-08-16T08:37:30 | 2019-08-16T08:37:30 | 202,690,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,954 | py | # Portfolio Website documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Portfolio Website"
copyright = """2019, Ariel Delgado"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "portfoliodoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"portfolio.tex",
"Portfolio Website Documentation",
"""Ariel Delgado""",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"portfolio",
"Portfolio Website Documentation",
["""Ariel Delgado"""],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"portfolio",
"Portfolio Website Documentation",
"""Ariel Delgado""",
"Portfolio Website",
"""A portfolio website""",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| [
"adboy316@yahoo.com"
] | adboy316@yahoo.com |
2b1cc39833028fe78b892f338542a7c098ba5de1 | 9f6ea5c3b8418cf5c3ba46f669e6d481df2c56ff | /core/models.py | 379ec048b300466c7b08f41d108f4510d7230d01 | [] | no_license | nazmulhasanDEV/Multi-vendorDjangoApp | afb5d24268f78b805f56e37c4acb2859a993ec2d | 94016ef0b9c97ef5c275b49673f096c89a173a6b | refs/heads/main | 2023-03-20T08:58:38.594308 | 2021-03-05T19:33:03 | 2021-03-05T19:33:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,513 | py | from users.models import CustomUser as User
from django.db.models import (Model, TextField, DateTimeField, ForeignKey,
CASCADE, FileField)
from django.core.files.storage import FileSystemStorage
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
class MessageModel(Model):
"""
This class represents a chat message. It has a owner (user), timestamp and
the message body.
"""
user = ForeignKey(User, on_delete=CASCADE, verbose_name='user',
related_name='from_user', db_index=True)
recipient = ForeignKey(User, on_delete=CASCADE, verbose_name='recipient',
related_name='to_user', db_index=True)
timestamp = DateTimeField('timestamp', auto_now_add=True, editable=False,
db_index=True)
body = TextField('body', blank=True,null=True)
attachment = FileField(blank=True,null=True, max_length=500)
attachmentName = TextField(blank=True,null=True)
def __str__(self):
return str(self.id)
def characters(self):
"""
Toy function to count body characters.
:return: body's char number
"""
return len(self.body)
def notify_ws_clients(self):
"""
Inform client there is a new message.
"""
notification = {
'type': 'recieve_group_message',
'message': '{}'.format(self.id)
}
channel_layer = get_channel_layer()
print("user.id {}".format(self.user.id))
print("user.id {}".format(self.recipient.id))
async_to_sync(channel_layer.group_send)("{}".format(self.user.id), notification)
async_to_sync(channel_layer.group_send)("{}".format(self.recipient.id), notification)
def save(self, *args, **kwargs):
"""
Trims white spaces, saves the message and notifies the recipient via WS
if the message is new.
"""
new = self.id
self.body = self.body.strip() # Trimming whitespaces from the body
super(MessageModel, self).save(*args, **kwargs)
if new is None:
self.notify_ws_clients()
# Meta
class Meta:
app_label = 'core'
verbose_name = 'message'
verbose_name_plural = 'messages'
ordering = ('-timestamp',)
class File(Model):
attachment = FileField(blank=True,null=True,
upload_to='attachment')
def __str__(self):
return self.attachment.name
| [
"kajoengasa@gmail.com"
] | kajoengasa@gmail.com |
0a7cfb626b7484cc5ed58e4ad6faabb5d9d062f4 | 388631f8d20dc496d4ada5278db4785e5dd58471 | /Binary Search Iterative.py | 9c68113cc7364ec4285507e28fc2ec0b4d4f0cbd | [] | no_license | aviralb/pythondatastructure | 621be5c3e22f7136df501c9aca32582387752d74 | 09281f94c6869e3a23b45c9012faa79a80e4b0c8 | refs/heads/master | 2020-06-25T10:36:40.354279 | 2019-07-28T13:20:57 | 2019-07-28T13:20:57 | 199,285,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | def binarysearch(A,key):
low=0
high=len(A)-1
while low<=high:
mid=(low+high)//2
if key==A[mid]:
return True
elif key<A[mid]:
high=mid-1
else:
low=mid+1
return False
A=[1,2,3,4,5,6,7,8,9]
found=binarysearch(A,5)
print("number found in : ", found)
| [
"noreply@github.com"
] | aviralb.noreply@github.com |
43c9c2f7df58504589611e48640b75181d0ef3fe | 496a4b331fd857834f30d1623d369ee985253f1d | /end/demo3/venv/Scripts/pip-script.py | 0a5f50144be45a0bd181eb569fe5bac1273fc3ca | [] | no_license | jwf-2333/biubiubiu | 44cb12dae8b12a207ef37ca455e867412f5acdea | 4f8e5fff36b11cfec8f1a0549644177b44295a67 | refs/heads/master | 2021-01-02T02:18:16.782003 | 2020-03-01T12:40:42 | 2020-03-01T12:40:42 | 239,450,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | #!D:\py1911\gitdemo\end\demo3\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"875016526@qq.com"
] | 875016526@qq.com |
3d02dd71a31241de31b514374aae7587e4864bec | cc18d49f4788ef6a16a33f249fa63606233fe367 | /ch08/tryexcept/value_error.py | 633f730237ae9fedbeb1e622672270010bebe499 | [] | no_license | github-solar/pyworks-eduwill | 75d4b66aec59604bda7ffd1170544b5025256342 | 81be4047e5cb5361bdb02fa2761e5b24660fadc4 | refs/heads/master | 2023-06-16T14:10:12.777764 | 2021-07-07T01:27:24 | 2021-07-07T01:27:24 | 378,804,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | #에러처리:컴퓨터 뻑될 때 빠져나오도록
#try: ~ except ValueError as e: ~ print(e) <- 이렇게 선언해도 되지만 아래처럼 설명문을 출력하도록
while True:
try:
x = int(input("숫자를 입력하세요 : "))
print(x)
break
except ValueError as e:
#print(e)
print('숫자형식이 아닙니다. 숫자를 입력하세요. ')
| [
"solarsun@nate.com"
] | solarsun@nate.com |
71d53581b6ded3f766c703c7f469ed9d46170c93 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-eg/huaweicloudsdkeg/v1/model/show_detail_of_event_source_response.py | ff28271917177aec0db1e6e2d2477c54a1caec43 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 15,438 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowDetailOfEventSourceResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'name': 'str',
'label': 'str',
'description': 'str',
'provider_type': 'str',
'event_types': 'list[CustomizeSourceInfoEventTypes]',
'created_time': 'str',
'updated_time': 'str',
'channel_id': 'str',
'channel_name': 'str',
'type': 'str',
'detail': 'object',
'status': 'str',
'error_info': 'ErrorInfo'
}
attribute_map = {
'id': 'id',
'name': 'name',
'label': 'label',
'description': 'description',
'provider_type': 'provider_type',
'event_types': 'event_types',
'created_time': 'created_time',
'updated_time': 'updated_time',
'channel_id': 'channel_id',
'channel_name': 'channel_name',
'type': 'type',
'detail': 'detail',
'status': 'status',
'error_info': 'error_info'
}
def __init__(self, id=None, name=None, label=None, description=None, provider_type=None, event_types=None, created_time=None, updated_time=None, channel_id=None, channel_name=None, type=None, detail=None, status=None, error_info=None):
"""ShowDetailOfEventSourceResponse
The model defined in huaweicloud sdk
:param id: 事件源ID
:type id: str
:param name: 事件源名称
:type name: str
:param label: 事件源名称展示
:type label: str
:param description: 事件源描述
:type description: str
:param provider_type: 事件源提供方类型,OFFICIAL:官方云服务事件源;CUSTOM:用户创建的自定义事件源;PARTNER:伙伴事件源
:type provider_type: str
:param event_types: 事件源提供的事件类型列表,只有官方云服务事件源提供事件类型
:type event_types: list[:class:`huaweicloudsdkeg.v1.CustomizeSourceInfoEventTypes`]
:param created_time: 创建UTC时间
:type created_time: str
:param updated_time: 更新UTC时间
:type updated_time: str
:param channel_id: 事件源归属的事件通道ID
:type channel_id: str
:param channel_name: 事件源归属的事件通道名称
:type channel_name: str
:param type: 事件源类型
:type type: str
:param detail: json格式封装消息实例链接信息:如RabbitMQ实例的instance_id字段、虚拟主机vhost字段、队列queue字段、用户名、密码等
:type detail: object
:param status: 自定义事件源状态
:type status: str
:param error_info:
:type error_info: :class:`huaweicloudsdkeg.v1.ErrorInfo`
"""
super(ShowDetailOfEventSourceResponse, self).__init__()
self._id = None
self._name = None
self._label = None
self._description = None
self._provider_type = None
self._event_types = None
self._created_time = None
self._updated_time = None
self._channel_id = None
self._channel_name = None
self._type = None
self._detail = None
self._status = None
self._error_info = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if label is not None:
self.label = label
if description is not None:
self.description = description
if provider_type is not None:
self.provider_type = provider_type
if event_types is not None:
self.event_types = event_types
if created_time is not None:
self.created_time = created_time
if updated_time is not None:
self.updated_time = updated_time
if channel_id is not None:
self.channel_id = channel_id
if channel_name is not None:
self.channel_name = channel_name
if type is not None:
self.type = type
if detail is not None:
self.detail = detail
if status is not None:
self.status = status
if error_info is not None:
self.error_info = error_info
@property
def id(self):
"""Gets the id of this ShowDetailOfEventSourceResponse.
事件源ID
:return: The id of this ShowDetailOfEventSourceResponse.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ShowDetailOfEventSourceResponse.
事件源ID
:param id: The id of this ShowDetailOfEventSourceResponse.
:type id: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this ShowDetailOfEventSourceResponse.
事件源名称
:return: The name of this ShowDetailOfEventSourceResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ShowDetailOfEventSourceResponse.
事件源名称
:param name: The name of this ShowDetailOfEventSourceResponse.
:type name: str
"""
self._name = name
@property
def label(self):
"""Gets the label of this ShowDetailOfEventSourceResponse.
事件源名称展示
:return: The label of this ShowDetailOfEventSourceResponse.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this ShowDetailOfEventSourceResponse.
事件源名称展示
:param label: The label of this ShowDetailOfEventSourceResponse.
:type label: str
"""
self._label = label
@property
def description(self):
"""Gets the description of this ShowDetailOfEventSourceResponse.
事件源描述
:return: The description of this ShowDetailOfEventSourceResponse.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ShowDetailOfEventSourceResponse.
事件源描述
:param description: The description of this ShowDetailOfEventSourceResponse.
:type description: str
"""
self._description = description
@property
def provider_type(self):
"""Gets the provider_type of this ShowDetailOfEventSourceResponse.
事件源提供方类型,OFFICIAL:官方云服务事件源;CUSTOM:用户创建的自定义事件源;PARTNER:伙伴事件源
:return: The provider_type of this ShowDetailOfEventSourceResponse.
:rtype: str
"""
return self._provider_type
@provider_type.setter
def provider_type(self, provider_type):
"""Sets the provider_type of this ShowDetailOfEventSourceResponse.
事件源提供方类型,OFFICIAL:官方云服务事件源;CUSTOM:用户创建的自定义事件源;PARTNER:伙伴事件源
:param provider_type: The provider_type of this ShowDetailOfEventSourceResponse.
:type provider_type: str
"""
self._provider_type = provider_type
@property
def event_types(self):
"""Gets the event_types of this ShowDetailOfEventSourceResponse.
事件源提供的事件类型列表,只有官方云服务事件源提供事件类型
:return: The event_types of this ShowDetailOfEventSourceResponse.
:rtype: list[:class:`huaweicloudsdkeg.v1.CustomizeSourceInfoEventTypes`]
"""
return self._event_types
@event_types.setter
def event_types(self, event_types):
"""Sets the event_types of this ShowDetailOfEventSourceResponse.
事件源提供的事件类型列表,只有官方云服务事件源提供事件类型
:param event_types: The event_types of this ShowDetailOfEventSourceResponse.
:type event_types: list[:class:`huaweicloudsdkeg.v1.CustomizeSourceInfoEventTypes`]
"""
self._event_types = event_types
@property
def created_time(self):
"""Gets the created_time of this ShowDetailOfEventSourceResponse.
创建UTC时间
:return: The created_time of this ShowDetailOfEventSourceResponse.
:rtype: str
"""
return self._created_time
@created_time.setter
def created_time(self, created_time):
"""Sets the created_time of this ShowDetailOfEventSourceResponse.
创建UTC时间
:param created_time: The created_time of this ShowDetailOfEventSourceResponse.
:type created_time: str
"""
self._created_time = created_time
@property
def updated_time(self):
"""Gets the updated_time of this ShowDetailOfEventSourceResponse.
更新UTC时间
:return: The updated_time of this ShowDetailOfEventSourceResponse.
:rtype: str
"""
return self._updated_time
@updated_time.setter
def updated_time(self, updated_time):
"""Sets the updated_time of this ShowDetailOfEventSourceResponse.
更新UTC时间
:param updated_time: The updated_time of this ShowDetailOfEventSourceResponse.
:type updated_time: str
"""
self._updated_time = updated_time
@property
def channel_id(self):
"""Gets the channel_id of this ShowDetailOfEventSourceResponse.
事件源归属的事件通道ID
:return: The channel_id of this ShowDetailOfEventSourceResponse.
:rtype: str
"""
return self._channel_id
@channel_id.setter
def channel_id(self, channel_id):
"""Sets the channel_id of this ShowDetailOfEventSourceResponse.
事件源归属的事件通道ID
:param channel_id: The channel_id of this ShowDetailOfEventSourceResponse.
:type channel_id: str
"""
self._channel_id = channel_id
@property
def channel_name(self):
"""Gets the channel_name of this ShowDetailOfEventSourceResponse.
事件源归属的事件通道名称
:return: The channel_name of this ShowDetailOfEventSourceResponse.
:rtype: str
"""
return self._channel_name
@channel_name.setter
def channel_name(self, channel_name):
"""Sets the channel_name of this ShowDetailOfEventSourceResponse.
事件源归属的事件通道名称
:param channel_name: The channel_name of this ShowDetailOfEventSourceResponse.
:type channel_name: str
"""
self._channel_name = channel_name
@property
def type(self):
"""Gets the type of this ShowDetailOfEventSourceResponse.
事件源类型
:return: The type of this ShowDetailOfEventSourceResponse.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ShowDetailOfEventSourceResponse.
事件源类型
:param type: The type of this ShowDetailOfEventSourceResponse.
:type type: str
"""
self._type = type
@property
def detail(self):
"""Gets the detail of this ShowDetailOfEventSourceResponse.
json格式封装消息实例链接信息:如RabbitMQ实例的instance_id字段、虚拟主机vhost字段、队列queue字段、用户名、密码等
:return: The detail of this ShowDetailOfEventSourceResponse.
:rtype: object
"""
return self._detail
@detail.setter
def detail(self, detail):
"""Sets the detail of this ShowDetailOfEventSourceResponse.
json格式封装消息实例链接信息:如RabbitMQ实例的instance_id字段、虚拟主机vhost字段、队列queue字段、用户名、密码等
:param detail: The detail of this ShowDetailOfEventSourceResponse.
:type detail: object
"""
self._detail = detail
@property
def status(self):
"""Gets the status of this ShowDetailOfEventSourceResponse.
自定义事件源状态
:return: The status of this ShowDetailOfEventSourceResponse.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ShowDetailOfEventSourceResponse.
自定义事件源状态
:param status: The status of this ShowDetailOfEventSourceResponse.
:type status: str
"""
self._status = status
@property
def error_info(self):
"""Gets the error_info of this ShowDetailOfEventSourceResponse.
:return: The error_info of this ShowDetailOfEventSourceResponse.
:rtype: :class:`huaweicloudsdkeg.v1.ErrorInfo`
"""
return self._error_info
@error_info.setter
def error_info(self, error_info):
"""Sets the error_info of this ShowDetailOfEventSourceResponse.
:param error_info: The error_info of this ShowDetailOfEventSourceResponse.
:type error_info: :class:`huaweicloudsdkeg.v1.ErrorInfo`
"""
self._error_info = error_info
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowDetailOfEventSourceResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
9b34beb6dedbd0f69cfefcef6a7af3b0b7339573 | 7416056e689dfc94391c4b108652cea02d59a31a | /reservation/urls.py | 47ec5528736d0b82dcaad6cf9c2c9f282c7817b9 | [] | no_license | zshanabek/house-booking-app | 0ea29fb8113671eb164ead8d335a986b850898a1 | cca5225f40b8a055a2db78810258325f2ba7ded1 | refs/heads/master | 2022-11-28T00:20:12.789534 | 2020-08-14T09:16:40 | 2020-08-14T09:16:40 | 225,791,244 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | from django.urls import path, include
from rest_framework_nested import routers
from house.views import HouseViewSet
from reservation.views import ReservationHostViewSet, ReservationGuestViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'requests', ReservationHostViewSet)
router.register(r'reservations', ReservationGuestViewSet)
# The API URLs are now determined automatically by the router.
urlpatterns = [
path('', include(router.urls)),
]
| [
"zshanabek@gmail.com"
] | zshanabek@gmail.com |
f408e1aa3019ed42b377e4c386f10d04f3d241b7 | d2f16ccb2ebbc91112225d90c4c466a5526e1502 | /airflow_ml_dags/test/test_predict.py | caad3e694b773058577d5e753f5a844d1f8c0f3c | [] | no_license | made-ml-in-prod-2021/panda1987ds | 551cb0e18e7a22578e843b0db13a18da19fb13c7 | 458a6df03d8c0233657ee872b68c339fb5f4a9c4 | refs/heads/main | 2023-06-04T09:22:35.444804 | 2021-06-23T20:07:25 | 2021-06-23T20:07:25 | 353,713,028 | 0 | 0 | null | 2021-06-23T20:07:26 | 2021-04-01T13:40:47 | HTML | UTF-8 | Python | false | false | 165 | py | def test_dag_loaded(dag_bag):
dag = dag_bag.dags.get('predict')
assert dag_bag.import_errors == {}
assert dag is not None
assert len(dag.tasks) == 2
| [
"pankratova.dasha@gmail.com"
] | pankratova.dasha@gmail.com |
65c0ea0324a0fad1e0015533893d4a1424de408c | 61a879713b4697d2a2ac1d7c709fa674cae9c4f8 | /Integrations/AwakeSecurity/AwakeSecurity.py | ae76b15fbd1bb9111cb753027e343e89464aeb66 | [
"MIT"
] | permissive | cloudshark/content | 589731a815caf0f25983b22e0c5eef758321a1aa | 005e5e913d145bec217752300d1c029c2581856d | refs/heads/master | 2020-09-04T18:26:27.001042 | 2019-11-05T19:39:59 | 2019-11-05T19:39:59 | 219,848,878 | 2 | 0 | MIT | 2019-11-05T20:56:29 | 2019-11-05T20:56:28 | null | UTF-8 | Python | false | false | 14,740 | py | import demistomock as demisto
from CommonServerPython import *
''' IMPORTS '''
import base64
import re
import requests
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS '''
handle_proxy()
params = demisto.params()
server = params["server"]
prefix = server + "/awakeapi/v1"
verify = not params.get('unsecure', False)
credentials = params["credentials"]
identifier = credentials["identifier"]
password = credentials["password"]
suspicious_threshold = params["suspicious_threshold"]
malicious_threshold = params["malicious_threshold"]
authTokenRequest = {
"loginUsername": identifier,
"loginPassword": password
}
authTokenResponse = requests.post(prefix + "/authtoken", json=authTokenRequest, verify=verify)
authToken = authTokenResponse.json()["token"]["value"]
headers = {
"Authentication": ("access " + authToken)
}
command = demisto.command()
args = demisto.args()
request = {}
''' HELPERS '''
# Convenient utility to marshal command arguments into the request body
def slurp(fields):
for field in fields:
if field in args:
request[field] = args[field]
# Render a subset of the fields of the Contents as a markdown table
def displayTable(contents, fields):
# We don't use a set() because we want to preserve field order
#
# The fields are ordered to put the most relevant information first
presentFields = [] # type: List[str]
# Omit table columns that are all empty
for content in contents:
for field in fields:
if field in content and content[field] and field not in presentFields:
presentFields.append(field)
line0 = "| "
line1 = "| "
for field in presentFields:
# Translate camel-case field names to title-case space-separated words
tokens = re.findall("[a-zA-Z][A-Z]*[^A-Z]*", field)
name = " ".join(map(lambda token: token.title(), tokens))
line0 += name + " | "
line1 += "--- | "
line0 += "\n"
line1 += "\n"
body = ""
for content in contents:
body += "| "
for field in presentFields:
if field in content:
value = json.dumps(content[field])
else:
value = ""
body += value + " | "
body += "\n"
if presentFields:
return (line0 + line1 + body)
else:
return "Empty results"
def returnResults(contents, outerKey, innerKey, humanReadable, dbotScore):
machineReadable = {
"AwakeSecurity": contents,
}
entryContext = {
("AwakeSecurity." + outerKey + "(val." + innerKey + "===obj." + innerKey + ")"): contents,
}
if dbotScore is not None:
machineReadable["DBotScore"] = dbotScore
entryContext["DBotScore"] = dbotScore
demisto.results({
"Type": entryTypes['note'],
"ContentsFormat": formats['json'],
"Contents": json.dumps(machineReadable),
"HumanReadable": humanReadable,
"ReadableContentsFormat": formats['markdown'],
"EntryContext": entryContext,
})
def toDBotScore(indicator_type, percentile, lookup_key):
if percentile <= suspicious_threshold:
score = 1
elif percentile <= malicious_threshold:
# Something doing something out of the ordinary
score = 2
else:
# Probably bad or at least not compliant with
# company policy.
score = 3
return {
"Vendor": "Awake Security",
"Type": indicator_type,
"Indicator": lookup_key,
"Score": score
}
''' COMMANDS '''
def lookup(lookup_type, lookup_key):
path = "/lookup/" + lookup_type
request["lookup_key"] = lookup_key
# default value of lookback_minutes is 480
if "lookback_minutes" not in args:
args["lookback_minutes"] = 480
request["lookback_minutes"] = int(args["lookback_minutes"])
response = requests.post(prefix + path, json=request, headers=headers, verify=verify)
if response.status_code < 200 or response.status_code >= 300:
return_error('Request Failed.\nStatus code: {} with body {} with headers {}'.format(
str(response.status_code),
response.content,
str(response.headers))
)
return response.json()
def lookupDevice():
lookup_key = args["device"]
contents = lookup("device", lookup_key)
humanReadableFields = [
"deviceScore",
"deviceName",
"deviceType",
"os",
"osVersion",
"commonEmail",
"commonUsername",
"tags",
"recentIP",
"activeIP",
"nSimilarDevices",
"ipCount",
"applicationCount",
# "protocols",
"firstSeen",
"lastSeen",
]
if "deviceScore" in contents:
dbotScore = toDBotScore("device", contents["deviceScore"], lookup_key)
else:
dbotScore = {
"Vendor": "Awake Security",
"Type": 'device',
"Indicator": lookup_key,
"Score": 0
}
humanReadable = displayTable([contents], humanReadableFields)
contents["device"] = lookup_key
returnResults(contents, "Devices", "device", humanReadable, dbotScore)
def lookupDomain():
lookup_key = args["domain"]
contents = lookup("domain", lookup_key)
humanReadableFields = [
"notability",
"isAlexaTopOneMillion",
"isDGA",
"intelSources",
"numAssociatedDevices",
"numAssociatedActivities",
"approxBytesTransferred",
"protocols",
"firstSeen",
"lastSeen",
]
if "notability" in contents:
dbotScore = toDBotScore("domain", contents["notability"], lookup_key)
else:
dbotScore = {
"Vendor": "Awake Security",
"Type": 'domain',
"Indicator": lookup_key,
"Score": 0
}
humanReadable = displayTable([contents], humanReadableFields)
contents["domain"] = lookup_key
returnResults(contents, "Domains", "domain", humanReadable, dbotScore)
def lookupEmail():
lookup_key = args["email"]
contents = lookup("email", lookup_key)
humanReadableFields = [
"notabilityPercentile",
"deviceName",
"os",
"deviceType",
"application",
"numberSimilarDevices",
"numberSessions",
"firstSeen",
"lastSeen",
"duration",
"deviceId",
]
if "notabilityPercentile" in contents:
dbotScore = toDBotScore("email", contents["notabilityPercentile"], lookup_key)
else:
dbotScore = {
"Vendor": "Awake Security",
"Type": 'email',
"Indicator": lookup_key,
"Score": 0
}
humanReadable = displayTable(contents, humanReadableFields)
for content in contents:
content["email"] = lookup_key
returnResults(contents, "Emails", "email", humanReadable, dbotScore)
def lookupIp():
lookup_key = args["ip"]
contents = lookup("ip", lookup_key)
humanReadableFields = [
"deviceCount",
"activityCount",
"ipFirstSeen",
"ipLastSeen",
]
dbotScore = {
"Vendor": "Awake Security",
"Type": 'ip',
"Indicator": lookup_key,
"Score": 0
}
# Note: No DBotScore for IP addresses as we do not score them.
# Our product scores devices rather than IP addresses.
humanReadable = displayTable([contents], humanReadableFields)
contents["ip"] = lookup_key
returnResults(contents, "IPs", "ip", humanReadable, dbotScore)
def query(lookup_type):
# Default to an empty query if unset
request["queryExpression"] = ""
slurp(["queryExpression", "startTime", "endTime"])
nameMappings = [
("ipAddress", "device.ip == {}"),
("deviceName", "device.name like r/{}/"),
("domainName", "domain.name like r/{}/"),
("protocol", "activity.protocol == \"{}\""),
("tags", "\"{}\" in device.tags"),
]
for (name, mapping) in nameMappings:
if name in args:
if "queryExpression" in request and request["queryExpression"]:
request["queryExpression"] = request["queryExpression"] + " && " + mapping.format(args[name])
else:
request["queryExpression"] = mapping.format(args[name])
path = "/query/" + lookup_type
response = requests.post(prefix + path, json=request, headers=headers, verify=verify)
if response.status_code < 200 or response.status_code >= 300:
return_error('Request Failed.\nStatus code: {} with body {} with headers {}'.format(
str(response.status_code),
response.content,
str(response.headers))
)
contents = response.json()
return request["queryExpression"], contents
def queryActivities():
q, contents = query("activities")
humanReadableFields = [
"sourceIP",
"sourceHost",
"sourcePort",
"destIP",
"destHost",
"destPort",
"activityDeviceName",
"activityStart",
"activityEnd",
"protocols",
]
humanReadable = displayTable(contents, humanReadableFields)
for content in contents:
content["query"] = q
returnResults(contents, "Activities", "query", humanReadable, None)
def queryDevices():
q, contents = query("devices")
humanReadableFields = [
"notabilityPercentile",
"deviceName",
"os",
"deviceType",
"application",
"numberSimilarDevices",
"numberSessions",
"firstSeen",
"lastSeen",
"duration",
"deviceId",
]
humanReadable = displayTable(contents, humanReadableFields)
for content in contents:
content["query"] = q
returnResults(contents, "Devices", "query", humanReadable, None)
def queryDomains():
q, contents = query("domains")
humanReadableFields = [
"name",
"notability",
"created",
"lastUpdated",
"expiration",
"registrantOrg",
"registrantCountry",
"registrarName",
"nameservers",
"deviceCount",
"intelCount",
"lastSeen",
]
humanReadable = displayTable(contents, humanReadableFields)
for content in contents:
content["query"] = q
returnResults(contents, "Domains", "query", humanReadable, None)
def pcapDownload():
slurp(["monitoringPointID"])
session = {}
for field in ["hostA", "hostB", "startTimeRFC3339Nano", "endTimeRFC3339Nano"]:
if field in args:
session[field] = args[field]
if "startTimeRFC3339Nano" in args:
session["startTimeRFC3339Nano"] = args["startTime"]
if "endTimeRFC3339Nano" in args:
session["endTimeRFC3339Nano"] = args["endTime"]
for field in ["protocol", "portA", "portB"]:
if field in args:
session[field] = int(args[field])
request["sessions"] = [session]
path = "/pcap/download"
response = requests.post(prefix + path, json=request, headers=headers, verify=verify)
if response.status_code < 200 or response.status_code >= 300:
return_error('Request Failed.\nStatus code: {} with body {} with headers {}'.format(
str(response.status_code),
response.content,
str(response.headers))
)
b64 = response.json()["pcap"]
bytes = base64.b64decode(b64)
demisto.results(fileResult("download.pcap", bytes))
def fetchIncidents():
threatBehaviorsString = params.get("threat_behaviors") or ""
threatBehaviors = [threatBehavior.strip() for threatBehavior in threatBehaviorsString.split(",")]
if threatBehaviors == [""]:
threatBehaviors = []
lastRun = demisto.getLastRun()
formatString = "%Y-%m-%d %H:%M:%S+0000"
earlyTimeString = "1970-01-01 00:00:00+0000"
startTimeString = lastRun.get("time") or earlyTimeString
startTime = datetime.strptime(startTimeString, formatString)
endTime = datetime.utcnow()
endTimeString = datetime.strftime(endTime, formatString)
if timedelta(minutes=int(params['fetch_interval'])) <= endTime - startTime:
jsonRequest = {
"startTime": startTimeString,
"endTime": endTimeString,
"threatBehaviors": threatBehaviors
}
response = requests.post(prefix + "/threat-behavior/matches", json=jsonRequest, headers=headers, verify=verify)
jsonResponse = response.json()
matchingThreatBehaviors = jsonResponse.get("matchingThreatBehaviors", [])
def toIncident(matchingThreatBehavior):
# Currently the threat behavior API doesn't allow us to retrieve metadata for
# the behaviors that matched, which is why this incident record is mostly empty
#
# However, we can provide the original query that the threat behavior corresponded
# to plus the date range so that a playbook can feed them back into
# `awake-query-{devices,activities}` to retrieving the matching devices or
# activities that triggered the match to the threat behavior.
return {
"Name": matchingThreatBehavior["name"],
"Query": matchingThreatBehavior["query"],
"StartTime": startTimeString,
"EndTime": endTimeString,
}
demisto.incidents(map(toIncident, matchingThreatBehaviors))
# Don't increase the low-water-mark until we actually find incidents
#
# This is a precaution because incidents sometimes appear in an old time
# bucket after a delay
if 0 < len(matchingThreatBehaviors):
lastRun = {"time": endTimeString}
else:
demisto.incidents([])
demisto.setLastRun(lastRun)
''' EXECUTION '''
LOG('command is %s' % (command))
try:
if command == "test-module":
# If we got this far we already successfully authenticated against the server
demisto.results('ok')
elif command == "fetch-incidents":
fetchIncidents()
elif command == "awake-query-devices":
queryDevices()
elif command == "awake-query-activities":
queryActivities()
elif command == "awake-query-domains":
queryDomains()
elif command == "awake-pcap-download":
pcapDownload()
elif command == "domain":
lookupDomain()
elif command == "email":
lookupEmail()
elif command == "ip":
lookupIp()
elif command == "device":
lookupDevice()
except Exception, e:
if command == "fetch-incidents":
raise
LOG(e)
LOG.print_log()
return_error(e.message)
| [
"noreply@github.com"
] | cloudshark.noreply@github.com |
31b8b63187088346a00a66b38ced5e627ee5a0d9 | e26e41625d7695ba655f833fee2a3ea3a85ffce3 | /SPModule/mysite/dashboard/admin.py | 847aa530c9f411e061bcd1eae8e384e65bf22862 | [] | no_license | RyanAquino/m0rbi-aliquam | ff699aa07527d83099ea7bff7c5bda9ba30ab91f | 6fde427ccc4e4d8f4c9ef43dcf351b0e78cdbae9 | refs/heads/master | 2021-01-20T00:50:39.824979 | 2017-05-22T07:23:07 | 2017-05-22T07:23:07 | 89,195,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | from django.contrib import admin
from .models import Sp
admin.site.register(Sp) | [
"dnvsayco@gmail.com"
] | dnvsayco@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.