Please note, this is a STATIC archive of website www.tutorialspoint.com from 11 May 2019, cach3.com does not collect or store any user information, there is no "phishing" involved.
Tutorialspoint

euler

# Euler method in Python - 

from math import exp

h=0.1
t0=0
y0=0
tmax=0.5
t=t0
y=y0

print "Euler method\n"
while t <= tmax:
    print t,'-> ',y
    k1=-2*t*y+2*t*exp(-t**2)
    y = y + h*k1
    t = t + h

IncySpider

import random
import numpy as np

def dice(): #Roll the dice!
    return random.randint(1,6)

def rain(prob): #Down came the rain and washed the spider out?
    r = random.random()
    keep = False
    if r< prob:
        keep = True
    return keep

def onegame(prain,length): #Play one game until you win, note moves taken
    n = 0
    i = 0
    while (n<length):
        i=i+1
        n = n + dice()
        if rain(prain):
            n=0
    return i

def testlength(NMAX,length,prain): #Repeat game to accumulate statistics
    g = list()
    for x in range(NMAX):
        goes = onegame(prain,length)
        g.append(goes)
    r = np.asarray(g)
    return r

def cumulative(y,p):    #After how many goes is cumulative prob > p?
    c = 0
    i = 0
    while (c<p): #Loop over histogram
        i = i + 1
        c = c + y[i] #cumulative probability
    return i 

def plotresults(rs):    #Plot the results!
    print 'Average goes/game = {0}'.format(np.mean(rs))
    bins = np.arange(rs.max()+1)   #Find out what the longest game was
    h = (np.histogram(rs, bins, density=True))  #Bin data
    x = h[1][:] #The bins are stored in the second column
    y = h[0][:] #The bin data is stored in the first column
    cp5 = cumulative(y,0.5) #after how many has cumul.P exceeded 0.5?
    print 'Half of games take less than {0} goes'.format(cp5)
    cp95 = cumulative(y,0.95) #after how many has cumul.P exceeded 0.5?
    print '95% of games are complete after {0} goes'.format(cp95)
    return y

rs1 = testlength(10000,10,0.4) #Incy Game
rs2 = testlength(10000,350,0.0) #Ludo (no rain!)
y1 = plotresults(rs1)
y2 = plotresults(rs2)

pima-indians-diabetes.data.csv

# Hello World program in Python
    
# Example of Naive Bayes implemented from Scratch in Python
import csv
import random
import math

def loadCsv(filename):
	lines = csv.reader(open(filename, "rb"))
	dataset = list(lines)
	for i in range(len(dataset)):
		dataset[i] = [float(x) for x in dataset[i]]
	return dataset

def splitDataset(dataset, splitRatio):
	trainSize = int(len(dataset) * splitRatio)
	trainSet = []
	copy = list(dataset)
	while len(trainSet) < trainSize:
		index = random.randrange(len(copy))
		trainSet.append(copy.pop(index))
	return [trainSet, copy]

def separateByClass(dataset):
	separated = {}
	for i in range(len(dataset)):
		vector = dataset[i]
		if (vector[-1] not in separated):
			separated[vector[-1]] = []
		separated[vector[-1]].append(vector)
	return separated

def mean(numbers):
	return sum(numbers)/float(len(numbers))

def stdev(numbers):
	avg = mean(numbers)
	variance = sum([pow(x-avg,2) for x in numbers])/float(len(numbers)-1)
	return math.sqrt(variance)

def summarize(dataset):
	summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)]
	del summaries[-1]
	return summaries

def summarizeByClass(dataset):
	separated = separateByClass(dataset)
	summaries = {}
	for classValue, instances in separated.iteritems():
		summaries[classValue] = summarize(instances)
	return summaries

def calculateProbability(x, mean, stdev):
	exponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))
	return (1 / (math.sqrt(2*math.pi) * stdev)) * exponent

def calculateClassProbabilities(summaries, inputVector):
	probabilities = {}
	for classValue, classSummaries in summaries.iteritems():
		probabilities[classValue] = 1
		for i in range(len(classSummaries)):
			mean, stdev = classSummaries[i]
			x = inputVector[i]
			probabilities[classValue] *= calculateProbability(x, mean, stdev)
	return probabilities
			
def predict(summaries, inputVector):
	probabilities = calculateClassProbabilities(summaries, inputVector)
	bestLabel, bestProb = None, -1
	for classValue, probability in probabilities.iteritems():
		if bestLabel is None or probability > bestProb:
			bestProb = probability
			bestLabel = classValue
	return bestLabel

def getPredictions(summaries, testSet):
	predictions = []
	for i in range(len(testSet)):
		result = predict(summaries, testSet[i])
		predictions.append(result)
	return predictions

def getAccuracy(testSet, predictions):
	correct = 0
	for i in range(len(testSet)):
		if testSet[i][-1] == predictions[i]:
			correct += 1
	return (correct/float(len(testSet))) * 100.0

def main():
	filename = 'pima-indians-diabetes.data.csv'
	splitRatio = 0.67
	dataset = loadCsv(filename)
	trainingSet, testSet = splitDataset(dataset, splitRatio)
	print('Split {0} rows into train={1} and test={2} rows').format(len(dataset), len(trainingSet), len(testSet))
	# prepare model
	summaries = summarizeByClass(trainingSet)
	# test model
	predictions = getPredictions(summaries, testSet)
	accuracy = getAccuracy(testSet, predictions)
	print('Accuracy: {0}%').format(accuracy)

main()

asdfgh

from xlwt import Workbook
import xlwt
book = Workbook()
sheet1 = book.add_sheet('Sheet 1')
book.add_sheet('Sheet 2')
for i in range(0, 100):
    st = xlwt.easyxf('pattern: pattern solid;')
    st.pattern.pattern_fore_colour = i
    sheet1.write(i % 24, i / 24, 'Test text',st)
book.save("D:\simple.xls")

https://github.com/kjahan/k-means/blob/master/clustering.py

import random as rand
import math as math
from point import Point
#import pkg_resources
#pkg_resources.require("matplotlib")
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt

class clustering:
    def __init__(self, geo_locs_, k_):
        self.geo_locations = geo_locs_
        self.k = k_
        self.clusters = []  #clusters of nodes
        self.means = []     #means of clusters
        self.debug = False  #debug flag
    #this method returns the next random node
    def next_random(self, index, points, clusters):
        #pick next node that has the maximum distance from other nodes
        dist = {}
        for point_1 in points:
            if self.debug:
                print 'point_1: %f %f' % (point_1.latit, point_1.longit) 
            #compute this node distance from all other points in cluster
            for cluster in clusters.values():
                point_2 = cluster[0]
                if self.debug:
                    print 'point_2: %f %f' % (point_2.latit, point_2.longit)
                if point_1 not in dist:
                    dist[point_1] = math.sqrt(math.pow(point_1.latit - point_2.latit,2.0) + math.pow(point_1.longit - point_2.longit,2.0))       
                else:
                    dist[point_1] += math.sqrt(math.pow(point_1.latit - point_2.latit,2.0) + math.pow(point_1.longit - point_2.longit,2.0))
        if self.debug:
            for key, value in dist.items():
                print "(%f, %f) ==> %f" % (key.latit,key.longit,value)
        #now let's return the point that has the maximum distance from previous nodes
        count_ = 0
        max_ = 0
        for key, value in dist.items():
            if count_ == 0:
                max_ = value
                max_point = key
                count_ += 1
            else:
                if value > max_:
                    max_ = value
                    max_point = key
        return max_point
    #this method computes the initial means
    def initial_means(self, points):
        #pick the first node at random
        point_ = rand.choice(points)
        if self.debug:
            print 'point#0: %f %f' % (point_.latit, point_.longit)
        clusters = dict()
        clusters.setdefault(0, []).append(point_)
        points.remove(point_)
        #now let's pick k-1 more random points
        for i in range(1, self.k):
            point_ = self.next_random(i, points, clusters)
            if self.debug:
                print 'point#%d: %f %f' % (i, point_.latit, point_.longit)
            #clusters.append([point_])
            clusters.setdefault(i, []).append(point_)
            points.remove(point_)
        #compute mean of clusters
        #self.print_clusters(clusters)
        self.means = self.compute_mean(clusters)
        if self.debug:
            print "initial means:"
            self.print_means(self.means)
    def compute_mean(self, clusters):
        means = []
        for cluster in clusters.values():
            mean_point = Point(0.0, 0.0)
            cnt = 0.0
            for point in cluster:
                #print "compute: point(%f,%f)" % (point.latit, point.longit)
                mean_point.latit += point.latit
                mean_point.longit += point.longit
                cnt += 1.0
            mean_point.latit = mean_point.latit/cnt
            mean_point.longit = mean_point.longit/cnt
            means.append(mean_point)
        return means
    #this method assign nodes to the cluster with the smallest mean
    def assign_points(self, points):
        if self.debug:
            print "assign points"
        clusters = dict()
        for point in points:
            dist = []
            if self.debug:
                print "point(%f,%f)" % (point.latit, point.longit)
            #find the best cluster for this node
            for mean in self.means:
                dist.append(math.sqrt(math.pow(point.latit - mean.latit,2.0) + math.pow(point.longit - mean.longit,2.0)))
            #let's find the smallest mean
            if self.debug:
                print dist
            cnt_ = 0
            index = 0
            min_ = dist[0]
            for d in dist:
                if d < min_:
                    min_ = d
                    index = cnt_
                cnt_ += 1
            if self.debug:
                print "index: %d" % index
            clusters.setdefault(index, []).append(point)
        return clusters
    def update_means(self, means, threshold):
        #check the current mean with the previous one to see if we should stop
        for i in range(len(self.means)):
            mean_1 = self.means[i]
            mean_2 = means[i]
            if self.debug:
                print "mean_1(%f,%f)" % (mean_1.latit, mean_1.longit)
                print "mean_2(%f,%f)" % (mean_2.latit, mean_2.longit)            
            if math.sqrt(math.pow(mean_1.latit - mean_2.latit,2.0) + math.pow(mean_1.longit - mean_2.longit,2.0)) > threshold:
                return False
        return True
    #debug function: print cluster points
    def print_clusters(self, clusters):
        cluster_cnt = 1
        for cluster in clusters.values():
            print "nodes in cluster #%d" % cluster_cnt
            cluster_cnt += 1
            for point in cluster:
                print "point(%f,%f)" % (point.latit, point.longit)
    #print means
    def print_means(self, means):
        for point in means:
            print "%f %f" % (point.latit, point.longit)
    #k_means algorithm
    def k_means(self, plot_flag):
        if len(self.geo_locations) < self.k:
            return -1   #error
        points_ = [point for point in self.geo_locations]
        #compute the initial means
        self.initial_means(points_)
        stop = False
        while not stop:
            #assignment step: assign each node to the cluster with the closest mean
            points_ = [point for point in self.geo_locations]
            clusters = self.assign_points(points_)
            if self.debug:
                self.print_clusters(clusters)
            means = self.compute_mean(clusters)
            if self.debug:
                print "means:"
                print self.print_means(means)
                print "update mean:"
            stop = self.update_means(means, 0.01)
            if not stop:
                self.means = []
                self.means = means
        self.clusters = clusters
        #plot cluster for evluation
        if plot_flag:
            fig = plt.figure()
            ax = fig.add_subplot(111)
            markers = ['o', 'd', 'x', 'h', 'H', 7, 4, 5, 6, '8', 'p', ',', '+', '.', 's', '*', 3, 0, 1, 2]
            colors = ['r', 'k', 'b', [0,0,0], [0,0,1], [0,1,0], [0,1,1], [1,0,0], [1,0,1], [1,1,0], [1,1,1]]
            cnt = 0
            for cluster in clusters.values():
                latits = []
                longits = []
                for point in cluster:
                    latits.append(point.latit)
                    longits.append(point.longit)
                ax.scatter(longits, latits, s=60, c=colors[cnt], marker=markers[cnt])
                cnt += 1
            plt.show()
return 0

Execute Python Online

# Hello World program in Python
    
print "Hello World!\n"
print "DDDaaaavvveee"

evolucion ejercicio estoca

import numpy as np

from numpy import matrix

from numpy import linalg as LA

import numpy.linalg as linalg


c  = matrix([ [1,0,0],[0,2.5,-0.5],[0,-0.5,2.5] ]) 

n = 10 # cantidad de realizaciones

mu = 0 

sigma = 1

z = matrix(np.random.normal(mu,sigma,(3,n))).transpose()

print(z)

autovalores, autovectores = linalg.eig(c) 

p = autovectores
print("\nestos son autovectores \n")
print(p)

print("\nestos son autovalores \n")
print(autovalores)

d = np.diag(autovalores)

for i in range (0,3):
    autovalores[i] = autovalores[i].


dRaiz = np.diag(autovalores)
print("\nesto es la d 1/2 \n")

print(d)
print("la matriz prueba")
print(p*d*(p.transpose()))



a = p*d

print(a)

x = a*z

print(x)

estimacionC = np.cov(x)

print(estimacionC)

isp

# Hello World program in Python
    
print "Hello World!\n"

Execute Python Online

# Hello World program in Python
    
print "Hello World!\n"

Execute Python Online

programa = "while(true){if(a+b==0){print(a);}}"

def filter(instr):
    tamanio = len(instr)
    corchetes = ""
    for x in range(0, tamanio):
        if instr[x]=="{" or instr[x]=="}":
            corchetes= corchetes + instr[x]
    return corchetes
    
print filter(programa)

def delete(instr):
    aux = filter(instr)
    tamanio = len(aux)
    borrado = ""
    primero = False
    encontro = False
    colocar = True
    borro = False
    
    for x in range(0, tamanio):
        if aux[x]=="{" and primero==False and borro==False:
            primero=True
            colocar=False
        if aux[x]=="}" and primero==True and borro==False:
            encontro=True
            colocar=False
            borro=True
        if colocar:
            borrado = borrado + aux[x]
        colocar = True
    if encontro:
        return borrado
    else:
        return aux
   
print delete(programa)
print delete("{{}{}}")
print delete("}{{")

def recursiva_del(instr):
    retorna=delete(instr)
    if instr == delete(instr):
        if instr == "":
            return "El programa revisado tiene sintaxis correcta"
        else:
            return "El programa revisado tiene sintaxis incorrecta"
    else:
        return recursiva_del(retorna)

print recursiva_del(programa)
    

Advertisements
Loading...

We use cookies to provide and improve our services. By using our site, you consent to our Cookies Policy.