def _(D = input_box([1,2,3,5,6,8,9,11,12,14],
label="[Domain] :",width=60),
Probs = input_box([1/20,1/20,1/20,3/20,1/20,4/20,4/20,1/20,1/20,3/20],
label=" $$[f(x)] :$$",width=60),
n_samples=slider(100,10000,100,100,label="$$ \\text{# of samples:}$$")):
print("f(x) values do not sum to 1")
variancef += D[k]^2*Probs[k]
G += line([(D[k],0),(D[k],Probs[k])],color='green')
variancef = variancef - meanf^2
G += points(f,color='blue',size=50)
G += point((meanf,0),color='yellow',size=60,zorder=3)
G += line([(meanf-sd,0),(meanf+sd,0)],color='red',thickness=5)
g = DiscreteProbabilitySpace(D,Probs)
print(" mean = %s"%str(meanf))
print(" variance = %s"%str(variancef))
print("Domain and Probabilities Probs must be lists of the same size")
counts = [0] * len(Probs)
X = GeneralDiscreteDistribution(Probs)
for _ in range(n_samples):
elem = X.get_random_element()
Empirical = [1.0*x/n_samples for x in counts]
samplemean = numpy.mean(sample)
samplevariance = numpy.var(sample)
sampdev = sqrt(samplevariance)
E = points(zip(D,Empirical),color='orange',size=40)
E += point((samplemean,0.005),color='brown',size=60,zorder=3)
E += line([(samplemean-sampdev,0.005),(samplemean+sampdev,0.005)],
color='orange',thickness=5)
(G+E).show(ymin=0,figsize=(5,4))