import matplotlib.pyplot as plt
import ebayesthresh_torch
import torch
ref
Import
=15) torch.set_printoptions(precision
import for code check
from scipy.stats import norm
from scipy.optimize import minimize
import numpy as np
beta.cauchy
Function beta for the quasi-Cauchy prior
- Description
Given a value or vector x of values, find the value(s) of the function \(\beta(x) = g(x)/\phi(x) − 1\), where \(g\) is the convolution of the quasi-Cauchy with the normal density \(\phi(x)\).
x가 입력되면 코시 분포와 정규 분포를 혼합해서 함수 베타 구하기
= torch.tensor([-2.0,1.0,0.0,-4.0,8.0,50.0],dtype=torch.float64)
x x
tensor([-2., 1., 0., -4., 8., 50.], dtype=torch.float64)
= x.clone().detach().type(torch.float64)
x x
tensor([-2., 1., 0., -4., 8., 50.], dtype=torch.float64)
= torch.tensor(norm.pdf(x, loc=0, scale=1))
phix phix
tensor([5.399096651318805e-02, 2.419707245191434e-01, 3.989422804014327e-01,
1.338302257648853e-04, 5.052271083536893e-15, 0.000000000000000e+00],
dtype=torch.float64)
= (x != 0)
j j
tensor([ True, True, False, True, True, True])
= x.clone()
beta beta
tensor([-2., 1., 0., -4., 8., 50.], dtype=torch.float64)
= torch.where(j == False, -1/2, beta)
beta beta
tensor([-2.000000000000000, 1.000000000000000, -0.500000000000000,
-4.000000000000000, 8.000000000000000, 50.000000000000000],
dtype=torch.float64)
= (torch.tensor(norm.pdf(0, loc=0, scale=1)) / phix[j] - 1) / (x[j] ** 2) - 1
beta[j] beta
tensor([ 5.972640247326628e-01, -3.512787292998718e-01, -5.000000000000000e-01,
1.852473741901080e+02, 1.233796252853370e+12, inf],
dtype=torch.float64)
beta
tensor([ 5.972640247326628e-01, -3.512787292998718e-01, -5.000000000000000e-01,
1.852473741901080e+02, 1.233796252853370e+12, inf],
dtype=torch.float64)
- R code
<- function(x) {
beta.cauchy #
# Find the function beta for the mixed normal prior with Cauchy
# tails. It is assumed that the noise variance is equal to one.
#
<- dnorm(x)
phix <- (x != 0)
j <- x
beta !j] <- -1/2
beta[<- (dnorm(0)/phix[j] - 1)/x[j]^2 - 1
beta[j] return(beta)
}
결과
- Python
-2,1,0,-4,8,50])) ebayesthresh_torch.beta_cauchy(torch.tensor([
tensor([ 5.972640247326628e-01, -3.512787292998718e-01, -5.000000000000000e-01,
1.852473741901080e+02, 1.233796252853370e+12, inf],
dtype=torch.float64)
- R
> beta.cauchy(c(-2,1,0,-4,8,50))
1] 5.972640e-01 -3.512787e-01 -5.000000e-01 1.852474e+02 1.233796e+12 Inf [
beta.laplace
Function beta for the Laplace prior
- Description
Given a single value or a vector of \(x\) and \(s\), find the value(s) of the function \(\beta(x; s, a) = \frac{g(x; s, a)}{f_n(x; 0, s)}−1\), where \(f_n(x; 0, s)\) is the normal density with mean \(0\) and standard deviation \(s\), and \(g\) is the convolution of the Laplace density with scale parameter a, \(γa(\mu)\), with the normal density \(f_n(x; µ, s)\) with mean mu and standard deviation \(s\).
평균이 \(\mu\)이며, 스케일 파라메터 a를 가진 라플라스와 정규분포의 합성함수 \(g\)와 평균이 0이고 표준편차가s인 f로 계산되는 함수 베타
= torch.tensor([-2,1,0,-4,8,50])
x # x = torch.tensor([2.14])
# s = 1
= torch.arange(1, 7)
s = 0.5 a
- s는 표준편차
- a는 Laplaxe prior모수, 이 값이 클수록 부포 모양이 뾰족해진다.
= torch.abs(x).type(torch.float64)
x x
tensor([ 2., 1., 0., 4., 8., 50.], dtype=torch.float64)
= x / s + s * a
xpa xpa
tensor([ 2.500000000000000, 1.500000000000000, 1.500000000000000,
3.000000000000000, 4.100000000000000, 11.333333333333334],
dtype=torch.float64)
= x / s - s * a
xma xma
tensor([ 1.500000000000000, -0.500000000000000, -1.500000000000000,
-1.000000000000000, -0.900000000000000, 5.333333333333334],
dtype=torch.float64)
= 1 / xpa
rat1 rat1
tensor([0.400000000000000, 0.666666666666667, 0.666666666666667,
0.333333333333333, 0.243902439024390, 0.088235294117647],
dtype=torch.float64)
= np.array(xpa.detach())
xpa_np < 35] = torch.tensor(norm.cdf(-xpa_np[xpa_np < 35], loc=0, scale=1) / norm.pdf(xpa_np[xpa_np < 35], loc=0, scale=1)) rat1[xpa
= 1 / torch.abs(xma)
rat2 rat2
tensor([0.666666666666667, 2.000000000000000, 0.666666666666667,
1.000000000000000, 1.111111111111111, 0.187500000000000],
dtype=torch.float64)
= np.array(xma.detach())
xma_np > 35] = 35
xma_np[xma_np > -35] = torch.tensor(norm.cdf(xma_np[xma_np > -35], loc=0, scale=1) / norm.pdf(xma_np[xma_np > -35], loc=0, scale=1)) rat2[xma
= (a * s / 2) * (rat1 + rat2) - 1
beta beta
tensor([ 8.898520296511427e-01, -3.039099526641722e-01, -2.262765426730551e-01,
-3.973015887109854e-02, 1.539501234263774e-01, 5.646947329772137e+06],
dtype=torch.float64)
- R code
<- function(x, s = 1, a = 0.5) {
beta.laplace #
# The function beta for the Laplace prior given parameter a and s (sd)
#
<- abs(x)
x <- x/s + s*a
xpa <- x/s - s*a
xma <- 1/xpa
rat1 < 35] <- pnorm( - xpa[xpa < 35])/dnorm(xpa[xpa < 35])
rat1[xpa <- 1/abs(xma)
rat2 > 35] <- 35
xma[xma > -35] <- pnorm(xma[xma > -35])/dnorm(xma[xma > -35])
rat2[xma <- (a * s) / 2 * (rat1 + rat2) - 1
beta return(beta)
}
결과
- Python
-2,1,0,-4,8,50]),s=1) ebayesthresh_torch.beta_laplace(torch.tensor([
tensor([ 8.898520296511427e-01, -3.800417166060107e-01, -5.618177717731538e-01,
2.854594666723506e+02, 1.026980615772411e+12, 6.344539544172600e+265],
dtype=torch.float64)
-2.0]),s=1,a=0.5) ebayesthresh_torch.beta_laplace(torch.tensor([
tensor([0.889852029651143], dtype=torch.float64)
-2,1,0,-4,8,50]),s=torch.arange(1, 7),a = 1) ebayesthresh_torch.beta_laplace(torch.tensor([
tensor([ 8.908210552068985e-01, -1.299192504522433e-01, -8.622910386969129e-02,
-5.203193149163621e-03, 5.421371767485761e-02, 1.124935767773541e+02],
dtype=torch.float64)
- R
> beta.laplace(c(-2,1,0,-4,8,50), s=1)
1] 8.898520e-01 -3.800417e-01 -5.618178e-01 2.854595e+02 1.026981e+12 6.344540e+265
[> beta.laplace(-2, s=1, a=0.5)
1] 0.889852
[> beta.laplace(c(-2,1,0,-4,8,50), s=1:6, a=1)
1] 0.890821055 -0.129919250 -0.086229104 -0.005203193 0.054213718 112.493576777 [
cauchy_medzero
the objective function that has to be zeroed, component by component, to find the posterior median when the quasi-Cauchy prior is used. x is the parameter vector, z is the data vector, w is the weight x and z may be scalars
- quasi-Cauchy prior에서 사후 중앙값 찾기 위한 함수
- x,z는 벡터일수도 있고, 스칼라일 수 도 있다.
= torch.tensor([-2,1,0,-4,8,50])
x # x = torch.tensor(4)
= torch.tensor([1,0,2,3,-1,-1])
z # z = torch.tensor(5)
= torch.tensor(0.5) w
= z - x
hh hh
tensor([ 3, -1, 2, 7, -9, -51])
= torch.tensor(norm.pdf(hh, loc=0, scale=1))
dnhh dnhh
tensor([4.431848411938008e-03, 2.419707245191434e-01, 5.399096651318805e-02,
9.134720408364595e-12, 1.027977357166892e-18, 0.000000000000000e+00],
dtype=torch.float64)
=0, scale=1) norm.cdf(hh, loc
array([9.98650102e-01, 1.58655254e-01, 9.77249868e-01, 1.00000000e+00,
1.12858841e-19, 0.00000000e+00])
= torch.tensor(norm.cdf(hh, loc=0, scale=1)) - z * dnhh + ((z * x - 1) * dnhh * torch.tensor(norm.cdf(-x, loc=0, scale=1) )) / torch.tensor(norm.pdf(x, loc=0, scale=1))
yleft yleft
tensor([7.535655913337148e-01, 0.000000000000000e+00, 8.016002934071383e-01,
9.999991126709799e-01, 1.644366208342579e-21, nan],
dtype=torch.float64)
= 1 + torch.exp(-z**2 / 2) * (z**2 * (1/w - 1) - 1)
yright2 yright2
tensor([1.000000000000000, 0.000000000000000, 1.406005859375000,
1.088871955871582, 1.000000000000000, 1.000000000000000])
/ 2 - yleft yright2
tensor([-0.253565591333715, 0.000000000000000, -0.098597363719638,
-0.455563134735189, 0.500000000000000, nan],
dtype=torch.float64)
- R코드
<- function(x, z, w) {
cauchy.medzero #
# the objective function that has to be zeroed, component by
# component, to find the posterior median when the quasi-Cauchy prior
# is used. x is the parameter vector, z is the data vector, w is the
# weight x and z may be scalars
#
<- z - x
hh <- dnorm(hh)
dnhh <- pnorm(hh) - z * dnhh + ((z * x - 1) * dnhh * pnorm( - x))/
yleft dnorm(x)
<- 1 + exp( - z^2/2) * (z^2 * (1/w - 1) - 1)
yright2 return(yright2/2 - yleft)
}
결과
- Python
- 벡터, 스칼라일때 가능한지 확인
-2,1,0,-4,8,50]),torch.tensor([1,0,2,3,-1,-1]),0.5) ebayesthresh_torch.cauchy_medzero(torch.tensor([
tensor([-0.253565591333715, 0.000000000000000, -0.098597363719638,
-0.455563134735189, 0.500000000000000, nan],
dtype=torch.float64)
4),torch.tensor(5),torch.tensor(0.5)) ebayesthresh_torch.cauchy_medzero(torch.tensor(
tensor(-0.219442442491987, dtype=torch.float64)
- R
> cauchy.medzero(c(-2,1,0,-4,8,50),c(1,0,2,3,-1,-1),0.5)
1] -0.25356559 0.00000000 -0.09859737 -0.45556313 0.50000000 NaN
[> cauchy.medzero(4,5,0.5)
1] -0.2194424 [
cauchy_threshzero
- cauchy 임계값 찾기 위한 것
- 아래에서 반환되는 y가 0에 가깝도록 만들어주는 z를 찾는 과정
= torch.tensor([1,0,2,3,-1,-1])
z # z = 0
# w = 0.5
= torch.tensor([0.5,0.4,0.3,0.2,0,0.1]) w
= (torch.tensor(norm.cdf(z, loc=0, scale=1)) - z * torch.tensor(norm.pdf(z, loc=0, scale=1)) - 0.5 - (z**2 * torch.exp(-z**2 / 2) * (1/w - 1)) / 2)
y y
tensor([-0.203891311626260, 0.000000000000000, -0.262296682131538,
0.285392626219174, -inf, -2.828762020130332],
dtype=torch.float64)
- R 코드
<- function(z, w) {
cauchy.threshzero #
# The objective function that has to be zeroed to find the Cauchy
# threshold. z is the putative threshold vector, w is the weight w
# can be a vector
#
<- pnorm(z) - z * dnorm(z) - 1/2 -
y ^2 * exp( - z^2/2) * (1/w - 1))/2
(zreturn(y)
}
결과
- Python
1,0,2,3,-1,-1]),0.5) ebayesthresh_torch.cauchy_threshzero(torch.tensor([
tensor([-0.203891311626260, 0.000000000000000, 0.098597372042885,
0.435364074104210, -0.402639354725059, -0.402639354725059],
dtype=torch.float64)
1,0,2,3,-1,-1]),torch.tensor([0.5,0.4,0.3,0.2,0,0.1])) ebayesthresh_torch.cauchy_threshzero(torch.tensor([
tensor([-0.203891311626260, 0.000000000000000, -0.262296682131538,
0.285392626219174, -inf, -2.828762020130332],
dtype=torch.float64)
- R
> cauchy.threshzero(c(1,0,2,3,-1,-1),0.5)
1] -0.20389131 0.00000000 0.09859737 0.43536407 -0.40263935 -0.40263935
[cauchy.threshzero(c(1,0,2,3,-1,-1), c(0.5,0.4,0.3,0.2,0,0.1))
1] -0.2038913 0.0000000 -0.2622967 0.2853926 -Inf -2.8287620 [
ebayesthresh
= torch.normal(torch.tensor([0] * 90 + [5] * 10, dtype=torch.float32), torch.ones(100, dtype=torch.float32))
x # sdev = None
= torch.tensor(1.0,requires_grad=True)
sdev ="laplace"
prior=torch.tensor(0.5,requires_grad=True)
a=False
bayesfac=False
verbose="median"
threshrule=True
universalthresh=None stabadjustment
= prior[0:1]
pr pr
'l'
if sdev is None: sdev = torch.tensor([float('nan')])
else: sdev = torch.tensor(sdev*1.0,requires_grad=True)
UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
else: sdev = torch.tensor(sdev*1.0,requires_grad=True)
if len([sdev.item()]) == 1:
if stabadjustment is not None:
raise ValueError("Argument stabadjustment is not applicable when variances are homogeneous.")
if torch.isnan(sdev):
= ebayesthresh_torch.mad(x, center=0)
sdev = True
stabadjustment_condition else:
if pr == "c":
raise ValueError("Standard deviation has to be homogeneous for Cauchy prior.")
if sdev.numel() != x.numel():
raise ValueError("Standard deviation has to be homogeneous or have the same length as observations.")
if stabadjustment is None:
= False
stabadjustment = stabadjustment stabadjustment_condition
if stabadjustment_condition:
= sdev.float()
sdev = torch.mean(sdev)
m_sdev = sdev / m_sdev
s = x / m_sdev
x else:
= sdev s
if pr == "l" and a is None:
= ebayesthresh_torch.wandafromx(x, s, universalthresh)
pp = pp["w"]
w = pp["a"]
a else:
= ebayesthresh_torch.wfromx(x, s, prior, a, universalthresh) w
if pr != "m" or verbose:
= ebayesthresh_torch.tfromw(w, s, prior=prior, bayesfac=bayesfac, a=a)[0]
tt if stabadjustment_condition:
= tt * m_sdev
tcor else:
= tt tcor
if threshrule == "median":
= ebayesthresh_torch.postmed(x, s, w, prior=prior, a=a)
muhat elif threshrule == "mean":
= ebayesthresh_torch.postmean(x, s, w, prior=prior, a=a)
muhat elif threshrule == "hard":
= ebayesthresh_torch.threshld(x, tt)
muhat elif threshrule == "soft":
= ebayesthresh_torch.threshld(x, tt, hard=False)
muhat elif threshrule == "none":
= None
muhat else:
raise ValueError(f"Unknown threshold rule: {threshrule}")
muhat
tensor([-0.000000000000000, -0.000000000000000, 0.000000000000000,
0.000000000000000, -0.000000000000000, 0.000000000000000,
-0.000000000000000, 0.000000000000000, 0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, -0.000000000000000, 0.000000000000000,
0.000000000000000, -0.000000000000000, -0.000000000000000,
-0.000000000000000, -0.000000000000000, 0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, -0.000000000000000, 0.000000000000000,
0.000000000000000, -0.000000000000000, 0.000000000000000,
0.000000000000000, 0.000000000000000, 0.000000000000000,
0.000000000000000, -1.470529670141027, 0.000000000000000,
-0.000000000000000, 0.000000000000000, -0.000000000000000,
-0.000000000000000, -0.000000000000000, 0.000000000000000,
0.000000000000000, -0.000000000000000, 0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, 0.000000000000000, 0.000000000000000,
0.000000000000000, -0.000000000000000, -0.000000000000000,
-0.000000000000000, -0.000000000000000, 0.000000000000000,
0.000000000000000, -0.000000000000000, 0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, 0.000000000000000, 0.000000000000000,
0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, -0.000000000000000, -0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, -0.000000000000000, -0.000000000000000,
-1.550813553968652, -0.000000000000000, -0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
5.005853918856763, 5.533827688719343, 3.196259515963121,
4.243412219223047, 2.092575251401420, 3.941843572458171,
5.833663273143095, 4.743063673607465, 5.411474567778996,
4.007322876479106], dtype=torch.float64, grad_fn=<MulBackward0>)
if stabadjustment_condition:
= muhat * m_sdev muhat
muhat
tensor([-0.000000000000000, -0.000000000000000, 0.000000000000000,
0.000000000000000, -0.000000000000000, 0.000000000000000,
-0.000000000000000, 0.000000000000000, 0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, -0.000000000000000, 0.000000000000000,
0.000000000000000, -0.000000000000000, -0.000000000000000,
-0.000000000000000, -0.000000000000000, 0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, -0.000000000000000, 0.000000000000000,
0.000000000000000, -0.000000000000000, 0.000000000000000,
0.000000000000000, 0.000000000000000, 0.000000000000000,
0.000000000000000, -1.470529670141027, 0.000000000000000,
-0.000000000000000, 0.000000000000000, -0.000000000000000,
-0.000000000000000, -0.000000000000000, 0.000000000000000,
0.000000000000000, -0.000000000000000, 0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, 0.000000000000000, 0.000000000000000,
0.000000000000000, -0.000000000000000, -0.000000000000000,
-0.000000000000000, -0.000000000000000, 0.000000000000000,
0.000000000000000, -0.000000000000000, 0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, 0.000000000000000, 0.000000000000000,
0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, -0.000000000000000, -0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, -0.000000000000000, -0.000000000000000,
-1.550813553968652, -0.000000000000000, -0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
5.005853918856763, 5.533827688719343, 3.196259515963121,
4.243412219223047, 2.092575251401420, 3.941843572458171,
5.833663273143095, 4.743063673607465, 5.411474567778996,
4.007322876479106], dtype=torch.float64, grad_fn=<MulBackward0>)
if not verbose:
muhatelse:
= {
retlist 'muhat': muhat,
'x': x,
'threshold.sdevscale': tt,
'threshold.origscale': tcor,
'prior': prior,
'w': w,
'a': a,
'bayesfac': bayesfac,
'sdev': sdev,
'threshrule': threshrule
}if pr == "c":
del retlist['a']
if threshrule == "none":
del retlist['muhat']
=True verbose
retlist
NameError: name 'retlist' is not defined
R코드
<- function (x, prior = "laplace", a = 0.5, bayesfac = FALSE,
ebayesthresh sdev = NA, verbose = FALSE, threshrule = "median",
universalthresh = TRUE, stabadjustment) {
#
# Given a vector of data x, find the marginal maximum likelihood
# estimator of the mixing weight w, and apply an appropriate
# thresholding rule using this weight.
#
# If the prior is laplace and a=NA, then the inverse scale parameter
# is also found by MML.
#
# Standard deviation sdev can be a vector (heterogeneous variance) or
# a single value (homogeneous variance). If sdev=NA, then it is
# estimated using the function mad(x). Heterogeneous variance is
# allowed only for laplace prior currently.
#
# The thresholding rules allowed are "median", "mean", "hard", "soft"
# and "none"; if "none" is used, then only the parameters are worked
# out.
#
# If hard or soft thresholding is used, the argument "bayesfac"
# specifies whether to use the bayes factor threshold or the
# posterior median threshold.
#
# If universalthresh=TRUE, the thresholds will be upper bounded by
# universal threshold adjusted by standard deviation; otherwise,
# weight w will be searched in [0, 1].
#
# If stabadjustment=TRUE, the observations and standard deviations
# will be first divided by the mean of all given standard deviations
# in case of inefficiency due to large value of standard
# deviation. In the case of homogeneous variance, the standard
# deviations will be normalized to 1 automatically.
#
# If verbose=TRUE then the routine returns a list with several
# arguments, including muhat which is the result of the
# thresholding. If verbose=FALSE then only muhat is returned.
#
# Find the standard deviation if necessary and estimate the parameters
<- substring(prior, 1, 1)
pr
if(length(sdev) == 1) {
if(!missing(stabadjustment))
stop(paste("Argument stabadjustment is not applicable when",
"variances are homogeneous."))
if(is.na(sdev)) {
<- mad(x, center = 0)
sdev
}= TRUE
stabadjustment_condition else{
} if(pr == "c")
stop("Standard deviation has to be homogeneous for Cauchy prior.")
if(length(sdev) != length(x))
stop(paste("Standard deviation has to be homogeneous or has the",
"same length as observations."))
if(missing(stabadjustment))
<- FALSE
stabadjustment = stabadjustment
stabadjustment_condition
}
if (stabadjustment_condition) {
<- mean(sdev)
m_sdev <- sdev/m_sdev
s <- x/m_sdev
x else { s <- sdev }
}
if ((pr == "l") & is.na(a)) {
<- wandafromx(x, s, universalthresh)
pp <- pp$w
w <- pp$a
a
}else
<- wfromx(x, s, prior = prior, a = a, universalthresh)
w if(pr != "m" | verbose) {
<- tfromw(w, s, prior = prior, bayesfac = bayesfac, a = a)
tt if(stabadjustment_condition) {
<- tt * m_sdev
tcor else {
} <- tt
tcor
}
}if(threshrule == "median")
<- postmed(x, s, w, prior = prior, a = a)
muhat if(threshrule == "mean")
<- postmean(x, s, w, prior = prior, a = a)
muhat if(threshrule == "hard")
<- threshld(x, tt)
muhat if(threshrule == "soft")
<- threshld(x, tt, hard = FALSE)
muhat if(threshrule == "none")
<- NA
muhat
# Now return desired output
if(stabadjustment_condition) {
<- muhat * m_sdev
muhat
}if(!verbose)
return(muhat)
<- list(muhat = muhat, x = x, threshold.sdevscale = tt,
retlist threshold.origscale = tcor, prior = prior, w = w,
a = a, bayesfac = bayesfac, sdev = sdev,
threshrule = threshrule)
if(pr == "c")
<- retlist[-7]
retlist if(threshrule == "none")
<- retlist[-1]
retlist return(retlist)
}
결과
- Python
=torch.normal(torch.tensor([0] * 90 + [5] * 10, dtype=torch.float32), torch.ones(100, dtype=torch.float32)), sdev = 1) ebayesthresh_torch.ebayesthresh(x
tensor([-0.000000000000000, 0.000000000000000, -0.000000000000000,
-0.000000000000000, 0.000000000000000, -0.000000000000000,
-0.000000000000000, 0.000000000000000, -0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, 0.000000000000000, 0.000000000000000,
0.000000000000000, -0.000000000000000, 0.000000000000000,
-0.164418213594979, -0.000000000000000, -0.000000000000000,
-0.000000000000000, 0.000000000000000, -0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, -0.000000000000000, 0.000000000000000,
-0.000000000000000, 0.000000000000000, 0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, 0.000000000000000, -0.000000000000000,
0.000000000000000, 0.000000000000000, -0.000000000000000,
-0.000000000000000, -0.000000000000000, 0.000000000000000,
2.599826913097172, -0.000000000000000, -0.000000000000000,
0.000000000000000, -0.000000000000000, -0.000000000000000,
0.000000000000000, 0.000000000000000, -0.000000000000000,
0.000000000000000, 0.000000000000000, -0.000000000000000,
0.000000000000000, 0.000000000000000, -0.000000000000000,
-0.722819235077744, 0.000000000000000, 0.000000000000000,
-0.000000000000000, 0.000000000000000, -0.000000000000000,
-0.000000000000000, -0.000000000000000, 0.000000000000000,
-0.000000000000000, -0.000000000000000, 0.000000000000000,
0.000000000000000, 0.000000000000000, 0.000000000000000,
-0.000000000000000, 0.000000000000000, 0.000000000000000,
-0.000000000000000, 0.000000000000000, 0.000000000000000,
-1.492949053755082, 0.000000000000000, -0.000000000000000,
-0.000000000000000, -0.000000000000000, -0.000000000000000,
-0.000000000000000, 0.000000000000000, -0.000000000000000,
4.385957434819470, 6.336716643835314, 3.709441119024078,
3.223851702304409, 6.918331146069010, 4.836058140567935,
5.867223121041847, 4.259974169736554, 5.624895479149825,
6.157949899783112], dtype=torch.float64, grad_fn=<MulBackward0>)
- R
> ebayesthresh(x = rnorm(100, c(rep(0,90), rep(5,10))),
+ prior = "laplace", sdev = 1)
1] 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000
[15] 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000
[29] 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000
[43] 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000
[57] 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000
[71] 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 -0.4480064
[85] 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 5.1534865 6.2732386 4.4612851 5.9931848 4.5828731 4.6154038 4.8247775 3.6219544
[99] 4.4480080 5.4084453 [
isotone
Isotonic Regression은 입력 변수에 따른 출력 변수의 단조 증가(monotonic increasing) 또는 감소(monotonic decreasing) 패턴을 찾는 방법
= ebayesthresh_torch.beta_cauchy(torch.tensor([-2,1,0,-4]))
beta = torch.ones(len(beta))
w = w + 1/beta
aa = w + aa
x = 1/aa**2
wt = False increasing
if wt is None:
= torch.ones_like(x) wt
= len(x)
nn nn
4
-2,1,0,-4])) ebayesthresh_torch.beta_cauchy(torch.tensor([
tensor([ 0.597264024732663, -0.351278729299872, -0.500000000000000,
185.247374190108047], dtype=torch.float64)
if nn == 1:
= x
x
if not increasing:
= -x x
= torch.arange(nn)
ip ip
tensor([0, 1, 2, 3])
= torch.diff(x)
dx dx
tensor([ 4.521043661450835, -0.846742249361595, -2.005398187177399],
dtype=torch.float64)
= len(x)
nx nx
4
while (nx > 1) and (torch.min(dx) < 0):
= torch.where((torch.cat([dx <= 0, torch.tensor([False])]) & torch.cat([torch.tensor([True]), dx > 0])))[0]
jmax = torch.where((torch.cat([dx > 0, torch.tensor([True])]) & torch.cat([torch.tensor([False]), dx <= 0])))[0]
jmin
for jb in range(len(jmax)):
= torch.arange(jmax[jb], jmin[jb] + 1)
ind = torch.sum(wt[ind])
wtn = torch.sum(wt[ind] * x[ind]) / wtn
x[jmax[jb]] = wtn
wt[jmax[jb]] + 1:jmin[jb] + 1] = torch.nan
x[jmax[jb]
= ~torch.isnan(x)
ind = x[ind]
x = wt[ind]
wt = ip[ind]
ip = torch.diff(x)
dx = len(x) nx
= torch.zeros(nn, dtype=torch.int32)
jj jj
tensor([0, 0, 0, 0], dtype=torch.int32)
= 1
jj[ip] jj
tensor([1, 1, 0, 0], dtype=torch.int32)
= x[torch.cumsum(jj, dim=0) - 1]
z z
tensor([-3.674301412089240, -0.760411047043364, -0.760411047043364,
-0.760411047043364], dtype=torch.float64)
if not increasing:
= -z z
z
tensor([3.674301412089240, 0.760411047043364, 0.760411047043364,
0.760411047043364], dtype=torch.float64)
R코드
<- function(x, wt = rep(1, length(x)), increasing = FALSE) {
isotone #
# find the weighted least squares isotone fit to the
# sequence x, the weights given by the sequence wt
#
# if increasing == TRUE the curve is set to be increasing,
# otherwise to be decreasing
#
# the vector ip contains the indices on the original scale of the
# breaks in the regression at each stage
#
<- length(x)
nn if(nn == 1)
return(x)
if(!increasing)
<- - x
x <- (1:nn)
ip <- diff(x)
dx <- length(x)
nx while((nx > 1) && (min(dx) < 0)) {
#
# do single pool-adjacent-violators step
#
# find all local minima and maxima
#
<- (1:nx)[c(dx <= 0, FALSE) & c(TRUE, dx > 0)]
jmax <- (1:nx)[c(dx > 0, TRUE) & c(FALSE, dx <= 0)]
jmin # do pav step for each pair of maxima and minima
#
# add up weights within subsequence that is pooled
# set first element of subsequence to the weighted average
# the first weight to the sum of the weights within the subsequence
# and remainder of the subsequence to NA
#
for(jb in (1:length(jmax))) {
<- (jmax[jb]:jmin[jb])
ind <- sum(wt[ind])
wtn <- sum(wt[ind] * x[ind])/wtn
x[jmax[jb]] <- wtn
wt[jmax[jb]] + 1):jmin[jb]] <- NA
x[(jmax[jb]
}#
# clean up within iteration, eliminating the parts of sequences that
# were set to NA
#
<- !is.na(x)
ind <- x[ind]
x <- wt[ind]
wt <- ip[ind]
ip <- diff(x)
dx <- length(x)
nx
}#
# final cleanup: reconstruct z at all points by repeating the pooled
# values the appropriate number of times
#
<- rep(0, nn)
jj <- 1
jj[ip] <- x[cumsum(jj)]
z if(!increasing)
<- - z
z return(z)
}
결과
- Python
= ebayesthresh_torch.beta_cauchy(torch.tensor([-2,1,0,-4]))
beta = torch.ones(len(beta))
w = w + 1/beta
aa = w + aa
ps = 1/aa**2
ww = ebayesthresh_torch.isotone(ps, ww, increasing = False)
wnew wnew
[3.67430141208924, 0.760411047043364, 0.760411047043364, 0.760411047043364]
R
> beta <- beta.cauchy(c(-2,1,0,-4))
> w <- rep(1, length(x))
> aa = w + 1/beta
> ps = w + aa
> ww = 1/aa**2
> wnew = isotone(ps, ww, increasing = FALSE)
> wnew
1] 3.674301 0.760411 0.760411 0.760411 [
laplace_threshzero
= torch.tensor([-2,1,0,-4,8,50])
x = 1
s = 0.5
w = 0.5 a
= min(a, 20)
a a
0.5
= x / s - s * a
xma xma
tensor([-2.500000000000000, 0.500000000000000, -0.500000000000000,
-4.500000000000000, 7.500000000000000, 49.500000000000000])
= np.array(xma.detach())
xma_np = torch.tensor(norm.cdf(xma_np, loc=0, scale=1)) - (1 / a) * (1 / s * torch.tensor(norm.pdf(xma_np, loc=0, scale=1))) * (1 / w + ebayesthresh_torch.beta_laplace(x, s, a))
z z
tensor([-0.095098724189572, -0.449199823501264, -0.704130653528599,
-0.009185957714915, 0.499999999999483, 1.000000000000000],
dtype=torch.float64)
R코드
<- function(x, s = 1, w = 0.5, a = 0.5) {
laplace.threshzero #
# The function that has to be zeroed to find the threshold with the
# Laplace prior. Only allow a < 20 for input value.
#
<- min(a, 20)
a <- x/s - s*a
xma <- pnorm(xma) - 1/a * (1/s*dnorm(xma)) * (1/w + beta.laplace(x, s, a))
z return(z)
}
결과
- Python
-2,1,0,-4,8,50]), s = 1, w = 0.5, a = 0.5) ebayesthresh_torch.laplace_threshzero(torch.tensor([
tensor([-0.095098724189572, -0.449199823501264, -0.704130653528599,
-0.009185957714915, 0.499999999999483, 1.000000000000000],
dtype=torch.float64)
-5), s = 1, w = 0.5, a = 0.5) ebayesthresh_torch.laplace_threshzero(torch.tensor(
tensor(-0.003369167953292, dtype=torch.float64)
- R
> laplace.threshzero(c(-2,1,0,-4,8,50), s = 1, w = 0.5, a = 0.5)
1] -0.095098724 -0.449199824 -0.704130654 -0.009185958 0.500000000 1.000000000
[> laplace.threshzero(-5, s = 1, w = 0.5, a = 0.5)
1] -0.003369168 [
negloglik_laplace
Marginal negative log likelihood function for laplace prior.
- 라플라스 프라이어에 대한 한계음의로그우도함수 계산
= torch.tensor([0.5,0.6,0.3])
xpar = torch.tensor([1,2,3,4,5])
xx = torch.tensor([1])
ss = torch.sqrt(2 * torch.log(torch.tensor(len([1, 2, 3, 4, 5])).float())) * 1
tlo = torch.tensor([0.0,0.0,0.0]) thi
= xpar[1]
a a
tensor(0.600000023841858)
= ebayesthresh_torch.wfromt(thi, ss, a=a)
wlo wlo
tensor([1., 1., 1.], dtype=torch.float64)
= ebayesthresh_torch.wfromt(tlo, ss, a=a)
whi whi
tensor([0.445282361582141], dtype=torch.float64)
= torch.max(wlo)
wlo wlo
tensor(1., dtype=torch.float64)
= torch.min(whi)
whi whi
tensor(0.445282361582141, dtype=torch.float64)
= torch.sum(torch.log(1 + (xpar[0] * (whi - wlo) + wlo) *
loglik
ebayesthresh_torch.beta_laplace(xx, ss, a)))-loglik
tensor(-16.797274811699509, dtype=torch.float64)
R코드
<- function(xpar, xx, ss, tlo, thi) {
negloglik.laplace #
# Marginal negative log likelihood function for laplace prior.
# Constraints for thresholds need to be passed externally.
#
# xx :data
# xpar :vector of two parameters:
# xpar[1] : a value between [0, 1] which will be adjusted to range of w
# xpar[2] : inverse scale (rate) parameter ("a")
# ss :vector of standard deviations
# tlo :lower bound of thresholds
# thi :upper bound of thresholds
#
<- xpar[2]
a
# Calculate the range of w given a, using negative monotonicity
# between w and t
<- wfromt(thi, ss, a = a)
wlo <- wfromt(tlo, ss, a = a)
whi <- max(wlo)
wlo <- min(whi)
whi <- sum(log(1 + (xpar[1] * (whi - wlo) + wlo) *
loglik beta.laplace(xx, ss, a)))
return(-loglik)
}
결과
- Python
= torch.tensor([0.5,0.6,0.3])
xpar = torch.tensor([1,2,3,4,5])
xx = torch.tensor([1])
ss = torch.sqrt(2 * torch.log(torch.tensor(len([1, 2, 3, 4, 5])).float())) * 1
tlo = torch.tensor([0.0,0.0,0.0]) thi
ebayesthresh_torch.negloglik_laplace(xpar, xx, ss, tlo, thi)
tensor(-16.797274811699509, dtype=torch.float64)
- R
> xpar <- c(0.5, 0.6, 0.3)
> xx <- c(1, 2, 3, 4, 5)
> ss <- c(1)
> tlo <- sqrt(2 * log(length(c(1, 2, 3, 4, 5)))) * 1
> thi <- c(0, 0, 0)
> negloglik.laplace(xpar, xx, ss, tlo, thi)
1] -16.79727 [
postmean
Given a single value or a vector of data and sampling standard deviations (sd equals 1 for Cauchy prior), find the corresponding posterior mean estimate(s) of the underlying signal value(s).
- 적절한 사후 평균 찾기
= torch.tensor([-2.0,1.0,0.0,-4.0,8.0,50.0])
x = torch.tensor([1.0])
s = torch.tensor([0.5])
w # prior = "cauchy"
= "laplace"
prior = 0.5 a
= prior[0:1]
pr pr
'l'
if pr == "l":
= ebayesthresh_torch.postmean_laplace(x, s, w, a=a)
mutilde elif pr == "c":
if torch.any(s != 1):
raise ValueError("Only standard deviation of 1 is allowed for Cauchy prior.")
= ebayesthresh_torch.postmean_cauchy(x, w)
mutilde else:
raise ValueError("Unknown prior type.")
mutilde
tensor([-1.011589622421743, 0.270953303334685, 0.000000000000000,
-3.488009240410718, 7.499999999992725, 49.500000000000000],
dtype=torch.float64)
R코드
<- function(x, s = 1, w = 0.5, prior = "laplace", a = 0.5) {
postmean #
# Find the posterior mean for the appropriate prior for
# given x, s (sd), w and a.
#
<- substring(prior, 1, 1)
pr if(pr == "l")
<- postmean.laplace(x, s, w, a = a)
mutilde if(pr == "c"){
if(any(s != 1))
stop(paste("Only standard deviation of 1 is allowed",
"for Cauchy prior."))
<- postmean.cauchy(x, w)
mutilde
}return(mutilde)
}
결과
- Python
-2.0,1.0,0.0,-4.0,8.0,50.0]), s=1, w = 0.5, prior = "laplace", a = 0.5) ebayesthresh_torch.postmean(torch.tensor([
tensor([-1.011589622421743, 0.270953303334685, 0.000000000000000,
-3.488009240410718, 7.499999999992725, 49.500000000000000],
dtype=torch.float64)
- R
> postmean(c(-2,1,0,-4,8,50), s=1, w = 0.5, prior = "laplace", a = 0.5)
1] -1.0115896 0.2709533 0.0000000 -3.4880092 7.5000000 49.5000000 [
postmean_cauchy
Find the posterior mean for the quasi-Cauchy prior with mixing weight w given data x, which may be a scalar or a vector.
- quasi-Cauch에 대한 사후 평균 구하기
=torch.tensor([-2.0,1.0,0.0,-4.0,8.0,50.0], dtype=float)
x = 0.5 w
= torch.nonzero(x == 0)
ind ind
tensor([[2]])
= x[x != 0]
x x
tensor([-2., 1., -4., 8., 50.], dtype=torch.float64)
= torch.exp(-x**2/2)
ex ex
tensor([1.353352832366127e-01, 6.065306597126334e-01, 3.354626279025119e-04,
1.266416554909418e-14, 0.000000000000000e+00], dtype=torch.float64)
= w * (x - (2 * (1 - ex))/x)
z z
tensor([-0.567667641618306, 0.106530659712633, -1.750083865656976,
3.875000000000002, 24.980000000000000], dtype=torch.float64)
= z / (w * (1 - ex) + (1 - w) * ex * x**2)
z z
tensor([-0.807489729485063, 0.213061319425267, -3.482643281306042,
7.749999999993821, 49.960000000000001], dtype=torch.float64)
= z
muhat muhat
tensor([-0.807489729485063, 0.213061319425267, -3.482643281306042,
7.749999999993821, 49.960000000000001], dtype=torch.float64)
= torch.tensor([0.0], dtype=float) muhat[ind]
muhat
tensor([-0.807489729485063, 0.213061319425267, 0.000000000000000,
7.749999999993821, 49.960000000000001], dtype=torch.float64)
R코드
<- function(x, w) {
postmean.cauchy #
# Find the posterior mean for the quasi-Cauchy prior with mixing
# weight w given data x, which may be a scalar or a vector.
#
<- x
muhat <- (x == 0)
ind <- x[!ind]
x <- exp( - x^2/2)
ex <- w * (x - (2 * (1 - ex))/x)
z <- z/(w * (1 - ex) + (1 - w) * ex * x^2)
z !ind] <- z
muhat[return(muhat)
}
결과
- Python
-2,1,0,-4,8,50]),0.5) ebayesthresh_torch.postmean_cauchy(torch.tensor([
tensor([-0.807489693164825, 0.213061332702637, 0.000000000000000,
7.750000000000000, 49.959999084472656])
- R
> postmean.cauchy(c(-2,1,0,-4,8,50),0.5)
1] -0.8074897 0.2130613 0.0000000 -3.4826433 7.7500000 49.9600000 [
postmean.laplace
Find the posterior mean for the double exponential prior for given \(x, s (sd), w\), and \(a\).
- 이전 지수 분포에 대한 사후 평균
= torch.tensor([-2,1,0,-4,8,50])
x = 1
s = 0.5
w = 0.5 a
= min(a, 20)
a a
0.5
= ebayesthresh_torch.wpost_laplace(w, x, s, a)
w_post w_post
tensor([0.653961521302972, 0.382700153299694, 0.304677821507423,
0.996521248676984, 0.999999999999026, 1.000000000000000],
dtype=torch.float64)
= torch.sign(x)
sx sx
tensor([-1, 1, 0, -1, 1, 1])
= torch.abs(x)
x x
tensor([ 2, 1, 0, 4, 8, 50])
= x / s + s * a
xpa xpa
tensor([ 2.500000000000000, 1.500000000000000, 0.500000000000000,
4.500000000000000, 8.500000000000000, 50.500000000000000])
= x / s - s * a
xma xma
tensor([ 1.500000000000000, 0.500000000000000, -0.500000000000000,
3.500000000000000, 7.500000000000000, 49.500000000000000])
= torch.minimum(xpa, torch.tensor(35.0))
xpa xpa
tensor([ 2.500000000000000, 1.500000000000000, 0.500000000000000,
4.500000000000000, 8.500000000000000, 35.000000000000000])
= torch.maximum(xma, torch.tensor(-35.0))
xma xma
tensor([ 1.500000000000000, 0.500000000000000, -0.500000000000000,
3.500000000000000, 7.500000000000000, 49.500000000000000])
= torch.tensor(norm.cdf(xma, loc=0, scale=1))
cp1 cp1
tensor([0.933192798731142, 0.691462461274013, 0.308537538725987,
0.999767370920964, 0.999999999999968, 1.000000000000000],
dtype=torch.float64)
= torch.tensor(norm.cdf(-xpa, loc=0, scale=1))
cp2 cp2
tensor([ 6.209665325776132e-03, 6.680720126885807e-02, 3.085375387259869e-01,
3.397673124730053e-06, 9.479534822203250e-18, 1.124910706472406e-268],
dtype=torch.float64)
= torch.exp(torch.minimum(2 * a * x, torch.tensor(100.0, dtype=torch.float32)))
ef ef
tensor([7.389056205749512e+00, 2.718281745910645e+00, 1.000000000000000e+00,
5.459814834594727e+01, 2.980958007812500e+03, 5.184705457665547e+21])
= x - a * s**2 * (2 * cp1 / (cp1 + ef * cp2) - 1)
postmean_cond postmean_cond
tensor([ 1.546864134156123, 0.708004167227234, 0.000000000000000,
3.500185515403230, 7.500000000000028, 49.500000000000000],
dtype=torch.float64)
* w_post * postmean_cond sx
tensor([-1.011589622421743, 0.270953303334685, 0.000000000000000,
-3.488009240410718, 7.499999999992725, 49.500000000000000],
dtype=torch.float64)
R코드
<- function(x, s = 1, w = 0.5, a = 0.5) {
postmean.laplace #
# Find the posterior mean for the double exponential prior for
# given x, s (sd), w and a.
#
# Only allow a < 20 for input value.
<- min(a, 20)
a
# First find the probability of being non-zero
<- wpost.laplace(w, x, s, a)
wpost
# Now find the posterior mean conditional on being non-zero
<- sign(x)
sx <- abs(x)
x <- x/s + s*a
xpa <- x/s - s*a
xma > 35] <- 35
xpa[xpa < -35] <- -35
xma[xma
<- pnorm(xma)
cp1 <- pnorm( - xpa)
cp2 <- exp(pmin(2 * a * x, 100))
ef <- x - a * s^2 * ( 2 * cp1/(cp1 + ef * cp2) - 1)
postmeancond
# Calculate posterior mean and return
return(sx * wpost * postmeancond)
}
결과
- Python
-2.0,1.0,0.0,-4.0,8.0,50.0])) ebayesthresh_torch.postmean_laplace(torch.tensor([
tensor([-1.011589622421743, 0.270953303334685, 0.000000000000000,
-3.488009240410718, 7.499999999992725, 49.500000000000000],
dtype=torch.float64)
- R
> postmean.laplace(c(-2,1,0,-4,8,50))
1] -1.0115896 0.2709533 0.0000000 -3.4880092 7.5000000 49.5000000 [
postmed
Description
Given a single value or a vector of data and sampling standard deviations (sd is 1 for Cauchy prior), find the corresponding posterior median estimate(s) of the underlying signal value(s).
사후 확률 중앙값 추정치 구하기
= torch.tensor([1.5, 2.5, 3.5])
x = 1
s = 0.5
w = "laplace"
prior = 0.5 a
= prior[0:1]
pr pr
'l'
if pr == "l":
= ebayesthresh_torch.postmed_laplace(x, s, w, a)
muhat elif pr == "c":
if np.any(s != 1):
raise ValueError("Only standard deviation of 1 is allowed for Cauchy prior.")
= ebayesthresh_torch.postmed_cauchy(x, w)
muhat else:
raise ValueError(f"Unknown prior: {prior}")
muhat
tensor([0.000000000000000, 1.734132351356471, 2.978157631290933],
dtype=torch.float64)
R코드
<- function (x, s = 1, w = 0.5, prior = "laplace", a = 0.5) {
postmed #
# Find the posterior median for the appropriate prior for
# given x, s (sd), w and a.
#
<- substring(prior, 1, 1)
pr if(pr == "l")
<- postmed.laplace(x, s, w, a)
muhat if(pr == "c") {
if(any(s != 1))
stop(paste("Only standard deviation of 1 is allowed",
"for Cauchy prior."))
<- postmed.cauchy(x, w)
muhat
}return(muhat)
}
결과
- Python
= torch.tensor([1.5, 2.5, 3.5])) ebayesthresh_torch.postmed(x
tensor([0.000000000000000, 1.734132351356471, 2.978157631290933],
dtype=torch.float64)
- R
postmed(x=c(1.5, 2.5, 3.5))
1] 0.000000 1.734132 2.978158 [
postmed_cauchy
= torch.tensor([10, 15, 20, 25])
x = 0.5 w
= len(x)
nx nx
4
= torch.full((nx,), float('nan'))
zest zest
tensor([nan, nan, nan, nan])
= torch.full((nx,), w)
w w
tensor([0.500000000000000, 0.500000000000000, 0.500000000000000,
0.500000000000000])
= torch.abs(x)
ax ax
tensor([10, 15, 20, 25])
= ax < 20
j j
tensor([ True, True, False, False])
~j] = ax[~j] - 2 / ax[~j]
zest[ zest
tensor([ nan, nan, 19.899999618530273,
24.920000076293945])
sum(j)) torch.zeros(torch.
tensor([0., 0.])
sum(j)).shape[0] torch.zeros(torch.
2
if torch.sum(j) > 0:
= ebayesthresh_torch.vecbinsolv(zf=torch.zeros(torch.sum(j)),
zest[j] =ebayesthresh_torch.cauchy_medzero,
fun=0, thi=torch.max(ax[j]), z=ax[j], w=w[j]) tlo
< 1e-7] = 0
zest[zest zest
tensor([ 9.800643920898438, 14.866861343383789, 19.899999618530273,
24.920000076293945])
= torch.sign(x) * zest
zest zest
tensor([ 9.800643920898438, 14.866861343383789, 19.899999618530273,
24.920000076293945])
R코드
<- function(x, w) {
postmed.cauchy #
# find the posterior median of the Cauchy prior with mixing weight w,
# pointwise for each of the data points x
#
<- length(x)
nx <- rep(NA, length(x))
zest <- rep(w, length.out = nx)
w <- abs(x)
ax <- (ax < 20)
j !j] <- ax[!j] - 2/ax[!j]
zest[if(sum(j) > 0) {
<- vecbinsolv(zf = rep(0, sum(j)), fun = cauchy.medzero,
zest[j] tlo = 0, thi = max(ax[j]), z = ax[j],
w = w[j])
}< 1e-007] <- 0
zest[zest <- sign(x) * zest
zest return(zest)
결과
- Python
=torch.tensor([10.0, 15.0, 20.0, 25.0]), w=0.5) ebayesthresh_torch.postmed_cauchy(x
tensor([ 9.800643920898438, 14.866861343383789, 19.899999618530273,
24.920000076293945])
- R
> postmed.cauchy(x=c(10, 15, 20, 25),w=0.5)
1] 9.800643 14.866861 19.900000 24.920000 [
postmed_laplace
= torch.tensor([1.5, 2.5, 3.5])
x = 1
s = 0.5
w = 0.5 a
= min(a, 20)
a a
0.5
= torch.sign(x)
sx sx
tensor([1., 1., 1.])
= torch.abs(x)
x x
tensor([1.500000000000000, 2.500000000000000, 3.500000000000000])
= x / s - s * a
xma xma
tensor([1., 2., 3.])
= np.array(xma.detach())
xma_np = 1 / a * (1 / s * torch.tensor(norm.pdf(xma_np, loc=0, scale=1))) * (1 / w + ebayesthresh_torch.beta_laplace(x, s, a))
zz zz
tensor([0.955593330923010, 0.604829429361236, 0.508713151551759],
dtype=torch.float64)
> 25] = 0.5
zz[xma zz
tensor([0.955593330923010, 0.604829429361236, 0.508713151551759],
dtype=torch.float64)
= np.array((torch.minimum(zz, torch.tensor(1))).detach())
tmp = torch.tensor(norm.ppf(tmp))
mucor mucor
tensor([1.701690841545193, 0.265867648643529, 0.021842368709067],
dtype=torch.float64)
= sx * torch.maximum(torch.tensor(0), xma - mucor) * s
muhat muhat
tensor([0.000000000000000, 1.734132351356471, 2.978157631290933],
dtype=torch.float64)
R코드
<- function(x, s = 1, w = 0.5, a = 0.5) {
postmed.laplace #
# Find the posterior median for the Laplace prior for
# given x (observations), s (sd), w and a.
#
# Only allow a < 20 for input value
<- min(a, 20)
a
# Work with the absolute value of x, and for x > 25 use the approximation
# to dnorm(x-a)*beta.laplace(x, a)
<- sign(x)
sx <- abs(x)
x <- x/s - s*a
xma <- 1/a * (1/s*dnorm(xma)) * (1/w + beta.laplace(x, s, a))
zz > 25] <- 1/2
zz[xma <- qnorm(pmin(zz, 1))
mucor <- sx * pmax(0, xma - mucor) * s
muhat return(muhat)
}
결과
- Python
= torch.tensor([1.5, 2.5, 3.5])) ebayesthresh_torch.postmed_laplace(x
tensor([0.000000000000000, 1.734132351356471, 2.978157631290933],
dtype=torch.float64)
- R
> postmed.laplace(x=c(1.5, 2.5, 3.5), s = 1, w = 0.5, a = 0.5)
1] 0.000000 1.734132 2.978158 [
threshld
임계값 t를 이용해서 데이터 조정
= torch.tensor(range(-5,5))
x =1.4
t=False hard
if hard:
= x * (torch.abs(x) >= t)
z else:
= torch.sign(x) * torch.maximum(torch.tensor(0.0), torch.abs(x) - t) z
z
tensor([-3.599999904632568, -2.599999904632568, -1.600000023841858,
-0.600000023841858, -0.000000000000000, 0.000000000000000,
0.000000000000000, 0.600000023841858, 1.600000023841858,
2.599999904632568])
R코드
<- function(x, t, hard = TRUE) {
threshld #
# threshold the data x using threshold t
# if hard=TRUE use hard thresholding
# if hard=FALSE use soft thresholding
if(hard) z <- x * (abs(x) >= t) else {
<- sign(x) * pmax(0, abs(x) - t)
z
}return(z)
}
결과
- Python
range(-5,5)), t=1.4, hard=False) ebayesthresh_torch.threshld(torch.tensor(
tensor([-3.599999904632568, -2.599999904632568, -1.600000023841858,
-0.600000023841858, -0.000000000000000, 0.000000000000000,
0.000000000000000, 0.600000023841858, 1.600000023841858,
2.599999904632568])
- R
> threshld(as.array(seq(-5, 5)), t=1.4, hard=FALSE)
1] -3.6 -2.6 -1.6 -0.6 0.0 0.0 0.0 0.6 1.6 2.6 3.6 [
wandafromx
Given a vector of data and a single value or vector of sampling standard deviations, find the marginal maximum likelihood choice of both weight and scale factor under the Laplace prior
= torch.tensor([-2,1,0,-4,8], dtype=float)
x = 1
s = True universalthresh
if universalthresh:
= torch.sqrt(2 * torch.log(torch.tensor(len(x)))) * s
thi else:
= torch.inf thi
thi
tensor(1.794122576713562)
if isinstance(s, int):
= torch.zeros(len(str(s)))
tlo else:
= torch.zeros(len(s)) tlo
tlo
tensor([0.])
= torch.tensor([0, 0.04])
lo lo
tensor([0.000000000000000, 0.039999999105930])
= torch.tensor([1, 3])
hi hi
tensor([1, 3])
= torch.tensor([0.5, 0.5])
startpar startpar
tensor([0.500000000000000, 0.500000000000000])
if 'optim' in globals():
= minimize(ebayesthresh_torch.negloglik_laplace, startpar, method='L-BFGS-B', bounds=[(lo[0], hi[0]), (lo[1], hi[1])], args=(x, s, tlo, thi))
result = result.x
uu else:
= minimize(ebayesthresh_torch.negloglik_laplace, startpar, bounds=[(lo[0], hi[0]), (lo[1], hi[1])], args=(x, s, tlo, thi))
result = result.x uu
= uu[1]
a a
0.30010822252477337
= ebayesthresh_torch.wfromt(thi, s, a=a)
wlo wlo
tensor(0.497624103823530, dtype=torch.float64)
= ebayesthresh_torch.wfromt(tlo, s, a=a)
whi whi
tensor([1.], dtype=torch.float64)
= torch.max(wlo)
wlo wlo
tensor(0.497624103823530, dtype=torch.float64)
= torch.min(whi)
whi whi
tensor(1., dtype=torch.float64)
= uu[0] * (whi - wlo) + wlo
w w
tensor(0.846681050730532, dtype=torch.float64)
R코드
<- function(x, s = 1, universalthresh = TRUE) {
wandafromx #
# Find the marginal max lik estimators of w and a given standard
# deviation s, using a bivariate optimization;
#
# If universalthresh=TRUE, the thresholds will be upper bounded by
# universal threshold adjusted by standard deviation. The threshold
# is constrained to lie between 0 and sqrt ( 2 log (n)) *
# s. Otherwise, threshold can take any nonnegative value;
#
# If running R, the routine optim is used; in S-PLUS the routine is
# nlminb.
#
# Range for thresholds
if(universalthresh) {
<- sqrt(2 * log(length(x))) * s
thi else{
} <- Inf
thi
}
<- rep(0, length(s))
tlo <- c(0,0.04)
lo <- c(1,3)
hi <- c(0.5,0.5)
startpar if (exists("optim")) {
<- optim(startpar, negloglik.laplace, method="L-BFGS-B",
uu lower = lo, upper = hi, xx = x, ss = s, thi = thi,
tlo = tlo)
<- uu$par
uu
}else {
<- nlminb(startpar, negloglik.laplace, lower = lo,
uu upper = hi, xx = x, ss = s, thi = thi, tlo = tlo)
<- uu$parameters
uu
}
<- uu[2]
a <- wfromt(thi, s, a = a)
wlo <- wfromt(tlo, s, a = a)
whi <- max(wlo)
wlo <- min(whi)
whi <- uu[1]*(whi - wlo) + wlo
w return(list(w=w, a=a))
}
결과
- Pythom
-2,1,0,-4,8,50], dtype=float)) ebayesthresh_torch.wandafromx(torch.tensor([
{'w': tensor(1., dtype=torch.float64), 'a': 0.41641347740360435}
-2,1,0,-4,8], dtype=float)) ebayesthresh_torch.wandafromx(torch.tensor([
{'w': tensor(0.846681050730532, dtype=torch.float64), 'a': 0.30010822252477337}
- R
> wandafromx(c(-2,1,0,-4,8,50))
$w
1] 1
[
$a
1] 0.4163946
[> wandafromx(c(-2,1,0,-4,8))
$w
1] 0.8466808
[
$a
1] 0.3001091 [
Mad(Median Absolute Deviation)
중앙값 절대 편차, 분산이나 퍼진 정도 확인 가능
결과
- Python
1, 2, 3, 3, 4, 4, 4, 5, 5.5, 6, 6, 6.5, 7, 7, 7.5, 8, 9, 12, 52, 90])) ebayesthresh_torch.mad(torch.tensor([
tensor(2.965199947357178)
- R
> mad(c(1, 2, 3, 3, 4, 4, 4, 5, 5.5, 6, 6, 6.5, 7, 7, 7.5, 8, 9, 12, 52, 90))
1] 2.9652 [
wfromt
Description
Given a value or vector of thresholds and sampling standard deviations (sd equals 1 for Cauchy prior), find the mixing weight for which this is(these are) the threshold(s) of the posterior median estimator. If a vector of threshold values is provided, the vector of corresponding weights is returned.
주어진 임계값과 표준편차에 대해, posterior median estimator에서 이 임계값이 나오도록 하는 혼합 가중치를 계산하는 함수가 제공된다.
# tt = np.array([2,3,5])
= torch.tensor(2.14, dtype=torch.float64)
tt = 1
s = 'laplace"'
prior = 0.5 a
= prior[0:1]
pr pr
'l'
if pr == "l":
= (tt / s - s * a).clone().detach().type(torch.float64)
tma = 1 / torch.abs(tma)
wi > -35] = torch.tensor(norm.cdf(tma[tma > -35], loc=0, scale=1)/norm.pdf(tma[tma > -35], loc=0, scale=1))
wi[tma = a * s * wi - ebayesthresh_torch.beta_laplace(tt, s, a) wi
if pr == "c":
= norm.pdf(tt, loc=0, scale=1)
dnz = 1 + (torch.tensor(norm.cdf(tt, loc=0, scale=1)) - tt * dnz - 1/2) / (torch.sqrt(torch.tensor(torch.pi/2)) * dnz * tt**2)
wi ~torch.isfinite(wi)] = 1 wi[
1 / wi
tensor(0.312639310177825, dtype=torch.float64)
- R코드
<- function(tt, s = 1, prior = "laplace", a = 0.5) {
wfromt #
# Find the weight that has posterior median threshold tt,
# given s (sd) and a.
#
<- substring(prior, 1, 1)
pr if(pr == "l"){
<- tt/s - s*a
tma <- 1/abs(tma)
wi > -35] <- pnorm(tma[tma > -35])/dnorm(tma[tma > -35])
wi[tma <- a * s * wi - beta.laplace(tt, s, a)
wi
}if(pr == "c") {
<- dnorm(tt)
dnz <- 1 + (pnorm(tt) - tt * dnz - 1/2)/
wi sqrt(pi/2) * dnz * tt^2)
(!is.finite(wi)] <- 1
wi[
}1/wi
}
결과
- Python
2,3,5]),prior='cachy') ebayesthresh_torch.wfromt(torch.tensor([
tensor([4.229634032914113e-01, 9.337993365820978e-02, 9.315908844505263e-05],
dtype=torch.float64)
2),prior='cachy') ebayesthresh_torch.wfromt(torch.tensor(
tensor(0.422963400115950, dtype=torch.float64)
2),prior='laplace') ebayesthresh_torch.wfromt(torch.tensor(
tensor(0.368633767549335, dtype=torch.float64)
- R
> wfromt(c(2,3,5),prior='cachy')
1] 4.229634e-01 9.337993e-02 9.315909e-05
[> wfromt(2,prior='cachy')
1] 0.4229634
[> wfromt(2,prior='laplace')
1] 0.3686338 [
wfromx
Description
The weight is found by marginal maximum likelihood. The search is over weights corresponding to threshold \(t_i\) in the range \([0, s_i \sqrt{2 log n}]\) if universalthresh=TRUE, where n is the length of the data vector and \((s_1, ..., s_n\)) (\(s_i\) is \(1\) for Cauchy prior) is the vector of sampling standard deviation of data (\(x_1, ..., x_n\)); otherwise, the search is over \([0, 1]\). The search is by binary search for a solution to the equation \(S(w) = 0\), where \(S\) is the derivative of the log likelihood. The binary search is on a logarithmic scale in \(w\). If the Laplace prior is used, the scale parameter is fixed at the value given for \(a\), and defaults to \(0.5\) if no value is provided. To estimate a as well as \(w\) by marginal maximum likelihood, use the routine wandafromx.
Suppose the vector \((x_1, \cdots, x_n)\) is such that \(x_i\) is drawn independently from a normal distribution with mean \(\theta_i\) and standard deviation \(s_i\) (\(s_i\) equals \(1\) for Cauchy prior). The prior distribution of the \(\theta_i\) is a mixture with probability \(1 − w\) of zero and probability \(w\) of a given symmetric heavy-tailed distribution. This routine finds the marginal maximum likelihood estimate of the parameter \(w\).
주어진 정규 분포 데이터에 대해 \(\theta_𝑖\)의 사전 분포가 주어진 상황에서, 모수 \(w\)의 최대우도 추정치를 계산하는 방법을 제공한다
= torch.tensor([0]*90 + [5]*10, dtype=torch.float)
s = torch.normal(mean=0, std=s)
x = "cauchy"
prior = 0.5
a = True universalthresh
= prior[0:1]
pr pr
'c'
if pr == "c":
= 1 s
if universalthresh:
= torch.sqrt(2 * torch.log(torch.tensor(len(x)))) * s
tuniv = ebayesthresh_torch.wfromt(tuniv, s, prior, a)
wlo = torch.max(wlo)
wlo else:
= 0 wlo
if pr == "l":
= ebayesthresh_torch.beta_laplace(x, s, a)
beta elif pr == "c":
= ebayesthresh_torch.beta_cauchy(x) beta
= 1
whi = torch.minimum(beta, torch.tensor(1e20))
beta
= torch.sum(beta / (1 + beta)) shi
if shi >= 0:
= 1 shi
= torch.sum(beta / (1 + wlo * beta)) slo
if slo <= 0:
= wlo slo
for _ in range(1,31):
= torch.sqrt(wlo * whi)
wmid = torch.sum(beta / (1 + wmid * beta))
smid if smid == 0:
= wmid
smid if smid > 0:
= wmid
wlo else:
= wmid whi
* whi) torch.sqrt(wlo
tensor(0.108260261783228, dtype=torch.float64)
- R코드
<- function (x, s = 1, prior = "laplace", a = 0.5,
wfromx universalthresh = TRUE) {
#
# Given the vector of data x and s (sd),
# find the value of w that zeroes S(w) in the
# range by successive bisection, carrying out nits harmonic bisections
# of the original interval between wlo and 1.
#
<- substring(prior, 1, 1)
pr if(pr == "c")
= 1
s if(universalthresh) {
<- sqrt(2 * log(length(x))) * s
tuniv <- wfromt(tuniv, s, prior, a)
wlo <- max(wlo)
wlo else
} = 0
wlo if(pr == "l")
<- beta.laplace(x, s, a)
beta if(pr == "c")
<- beta.cauchy(x)
beta <- 1
whi <- pmin(beta, 1e20)
beta <- sum(beta/(1 + beta))
shi if(shi >= 0)
return(1)
<- sum(beta/(1 + wlo * beta))
slo if(slo <= 0)
return(wlo)
for(j in (1:30)) {
<- sqrt(wlo * whi)
wmid <- sum(beta/(1 + wmid * beta))
smid if(smid == 0)
return(wmid)
if(smid > 0)
<- wmid
wlo else
<- wmid
whi
}return(sqrt(wlo * whi))
}
결과
- Python
= torch.normal(mean=0, std=torch.tensor([0]*90 + [5]*10, dtype=torch.float)), prior = "cauchy") ebayesthresh_torch.wfromx(x
tensor(0.086442935467523, dtype=torch.float64)
- R
> wfromx(x = rnorm(100, s = c(rep(0,90),rep(5,10))), prior = "cauchy")
1] 0.116067 [
wmonfromx
Given a vector of data, find the marginal maximum likelihood choice of weight sequence subject to the constraints that the weights are monotone decreasing
데이터에 대해 가중치 시퀀스를 선택하는 과정에서 조건이 주어지는데, 이 가중치 시퀀스는 각각의 가중치 값이 단조 감소해야 하며, 주어진 데이터에 대한 최대 우도를 갖도록 선택되어야 함.
= torch.randn(10)
xd = "laplace"
prior = 0.5
a = 1e-08
tol = 20 maxits
= prior[0:1]
pr pr
'l'
= len(xd)
nx nx
10
= ebayesthresh_torch.wfromt(torch.sqrt(2 * torch.log(torch.tensor(len(xd)))), prior=prior, a=a)
wmin wmin
tensor(0.310296798704717, dtype=torch.float64)
= torch.tensor(1)
winit winit
tensor(1)
if pr == "l":
= ebayesthresh_torch.beta_laplace(xd, a=a)
beta if pr == "c":
= ebayesthresh_torch.beta_cauchy(xd) beta
= winit.repeat_interleave(len(beta))
w w
tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
for j in range(maxits):
= w + 1 / beta
aa = w + aa
ps = 1 / aa ** 2
ww = torch.tensor(ebayesthresh_torch.isotone(ps, ww, increasing=False))
wnew = torch.maximum(wmin, wnew)
wnew = torch.minimum(torch.tensor(1), wnew)
wnew = torch.max(torch.abs(torch.diff(wnew)))
zinc = wnew
w # if zinc < tol:
# return w
# warnings.filterwarnings("More iterations required to achieve convergence")
R코드
<- function (xd, prior = "laplace", a = 0.5,
wmonfromx tol = 1e-08, maxits = 20) {
#
# Find the monotone marginal maximum likelihood estimate of the
# mixing weights for the Laplace prior with parameter a. It is
# assumed that the noise variance is equal to one.
#
# Find the beta values and the minimum weight
#
# Current version allows for standard deviation of 1 only.
#
<- substring(prior, 1, 1)
pr <- length(xd)
nx <- wfromt(sqrt(2 * log(length(xd))), prior=prior, a=a)
wmin <- 1
winit if(pr == "l")
<- beta.laplace(xd, a=a)
beta if(pr == "c")
<- beta.cauchy(xd)
beta
# now conduct iterated weighted least squares isotone regression
#
<- rep(winit, length(beta))
w for(j in (1:maxits)) {
<- w + 1/beta
aa <- w + aa
ps <- 1/aa^2
ww <- isotone(ps, ww, increasing = FALSE)
wnew <- pmax(wmin, wnew)
wnew <- pmin(1, wnew)
wnew <- max(abs(range(wnew - w)))
zinc <- wnew
w if(zinc < tol)
return(w)
}
warning("More iterations required to achieve convergence")
return(w)
}
결과
- Python
= torch.randn(10), prior = "laplace") ebayesthresh_torch.wmonfromx(xd
tensor([0.310296803712845, 0.310296803712845, 0.310296803712845,
0.310296803712845, 0.310296803712845, 0.310296803712845,
0.310296803712845, 0.310296803712845, 0.310296803712845,
0.310296803712845])
- R
> wmonfromx(xd <- rnorm(10,0,1), prior = "laplace")
1] 0.3102968 0.3102968 0.3102968 0.3102968 0.3102968 0.3102968 0.3102968 0.3102968 0.3102968 0.3102968 [
wmonfromx(xd=rnorm(5, s = 1), prior = “laplace”, a = 0.5, tol = 1e-08, maxits = 20) [1] 0.9363989 0.9363989 0.9363989 0.4522184 0.4522184
vecbinsolv
= torch.tensor([1.0, 2.0, 3.0])
zf # zf = 0
def fun(t, *args, **kwargs):
= kwargs.get('c', 0)
c = t.clone().detach().type(torch.float64)
t return t**2 + c
= 0
tlo = 10
thi = 30 nits
if isinstance(zf, (int, float, str, bool)):
= len(str(zf))
nz else:
if zf.ndimension() == 0:
= len(str(zf.item()))
nz else:
= zf.shape[0] nz
= torch.full((nz,), tlo, dtype=torch.float32) tlo
tlo
tensor([0., 0., 0.])
= torch.full((nz,), thi, dtype=torch.float32) thi
thi
tensor([10., 10., 10.])
if tlo.numel() != nz:
raise ValueError("Lower constraint has to be homogeneous or have the same length as the number of functions.")
if thi.numel() != nz:
raise ValueError("Upper constraint has to be homogeneous or have the same length as the number of functions.")
=2
c
for _ in range(nits):
= (tlo + thi) / 2
tmid if fun == ebayesthresh_torch.cauchy_threshzero:
= fun(tmid, w=w)
fmid elif fun == ebayesthresh_torch.laplace_threshzero:
= fun(tmid, s=s,w=w,a=a)
fmid elif fun == ebayesthresh_torch.beta_cauchy:
= fun(tmid)
fmid elif fun == ebayesthresh_torch.beta_laplace:
= fun(tmid, z=z,w=w)
fmid else:
= fun(tmid)
fmid
= fmid <= zf
indt
= torch.where(indt, tmid, tlo)
tlo = torch.where(~indt, tmid, thi) thi
tmid
tensor([1.000000000000000, 1.414213657379150, 1.732050895690918])
fmid
tensor([1.000000000000000, 2.000000268717713, 3.000000305263711],
dtype=torch.float64)
indt
tensor([ True, False, False])
tlo
tensor([1.000000000000000, 1.414213538169861, 1.732050776481628])
thi
tensor([1.000000119209290, 1.414213657379150, 1.732050895690918])
= (tlo + thi) / 2
tsol tsol
tensor([1.000000000000000, 1.414213657379150, 1.732050895690918])
R코드
<- function(zf, fun, tlo, thi, nits = 30, ...) {
vecbinsolv #
# Given a monotone function fun, and a vector of values
# zf find a vector of numbers t such that f(t) = zf.
# The solution is constrained to lie on the interval (tlo, thi)
#
# The function fun may be a vector of increasing functions
#
# Present version is inefficient because separate calculations
# are done for each element of z, and because bisections are done even
# if the solution is outside the range supplied
#
# It is important that fun should work for vector arguments.
# Additional arguments to fun can be passed through ...
#
# Works by successive bisection, carrying out nits harmonic bisections
# of the interval between tlo and thi
#
<- length(zf)
nz if(length(tlo)==1) tlo <- rep(tlo, nz)
if(length(tlo)!=nz)
stop(paste("Lower constraint has to be homogeneous",
"or has the same length as #functions."))
if(length(thi)==1) thi <- rep(thi, nz)
if(length(thi)!=nz)
stop(paste("Upper constraint has to be homogeneous",
"or has the same length as #functions."))
# carry out nits bisections
#
for(jj in (1:nits)) {
<- (tlo + thi)/2
tmid <- fun(tmid, ...)
fmid <- (fmid <= zf)
indt <- tmid[indt]
tlo[indt] !indt] <- tmid[!indt]
thi[
}<- (tlo + thi)/2
tsol return(tsol)
}
결과
- Python
= torch.tensor([1.0, 2.0, 3.0])
zf def fun(t, *args, **kwargs):
= t.clone().detach().type(torch.float64)
t return 2*t
= 0
tlo = 10
thi ebayesthresh_torch.vecbinsolv(zf, fun, tlo, thi)
tensor([0.500000000000000, 1.000000000000000, 1.500000000000000])
def fun(t, *args, **kwargs):
= t.clone().detach().type(torch.float64)
t return t**2
ebayesthresh_torch.vecbinsolv(zf, fun, tlo, thi)
tensor([1.000000000000000, 1.414213657379150, 1.732050895690918])
- R
> zf <- c(1, 2, 3)
> fun <- function(x) 2*x
> tlo <- 0
> thi <- 10
> vecbinsolv(zf, fun, tlo, thi)
1] 0.5 1.0 1.5
[> fun <- function(x) x**2
> vecbinsolv(zf, fun, tlo, thi)
1] 1.000000 1.414214 1.732051 [
tfromw
Given a single value or a vector of weights (i.e. prior probabilities that the parameter is nonzero) and sampling standard deviations (sd equals 1 for Cauchy prior), find the corresponding threshold(s) under the specified prior.
주어진 가중치 벡터 w와 s(표준 편차)에 대해 지정된 사전 분포를 사용하여 임계값 또는 해당 가중치에 대한 임계값 벡터 찾기. 만약 bayesfac=True이면 베이즈 요인 임계값을 찾고, 그렇지 않으면 사후 중앙값 임계값을 찾음. 만약 Laplace 사전 분포를 사용하는 경우, a는 역 스케일(즉, rate) 매개변수의 값 나옴.
Parameters:
- w (array-like): 가중치 벡터
- s (float): 표준 편차(default: 1)
- prior (str): 사전 분포 (default: “laplace”)
- bayesfac (bool): 베이즈 요인 임계값을 찾는지 여부 (default: False)
- a (float): a < 20인 입력 값 (default: 0.5)
= torch.tensor([0.05, 0.1])
w # w = 0.5
= 1
s = "laplace"
prior # prior = "c"
= False
bayesfac = 0.5 a
= prior[0:1]
pr pr
'l'
if not isinstance(w, torch.Tensor):
= torch.tensor(w)
w if not isinstance(s, torch.Tensor):
= torch.tensor(s)
s if not isinstance(a, torch.Tensor):
= torch.tensor(a)
a
if bayesfac:
= 1 / w - 2
z if pr == "l":
if w.dim() == 0 or len(w) >= len(s):
= z
zz else:
= z.repeat(len(s))
zz = ebayesthresh_torch.vecbinsolv(zz, ebayesthresh_torch.beta_laplace, 0, 10, s=s, a=a)
tt elif pr == "c":
= ebayesthresh_torch.vecbinsolv(z, ebayesthresh_torch.beta_cauchy, 0, 10)
tt else:
= torch.tensor(0.0)
z if pr == "l":
= torch.zeros(max(s.numel(), w.numel()))
zz = s * (25 + s * a)
upper_bound = ebayesthresh_torch.vecbinsolv(zz, ebayesthresh_torch.laplace_threshzero, 0, upper_bound, s=s, w=w, a=a)
tt elif pr == "c":
= ebayesthresh_torch.vecbinsolv(z, ebayesthresh_torch.cauchy_threshzero, 0, 10, w=w) tt
# if bayesfac:
# z = 1 / w - 2
# if pr == "l":
# if isinstance(s, (int, float, str, bool)) and w.numel() >= len(str(s)):
# zz = z
# elif isinstance(s, (int, float, str, bool)) and w.numel() < len(str(s)):
# zz = torch.tensor([z] * len(str(s))) # numpy 배열을 torch 텐서로 변환
# elif len(w) >= len(s):
# zz = z
# elif len(w) < len(str(s)):
# zz = torch.tensor([z] * len(s)) # numpy 배열을 torch 텐서로 변환
# tt = ebayesthresh_torch.vecbinsolv(zz, ebayesthresh_torch.beta_laplace, 0, 10, 30, s=torch.tensor(s), w=torch.tensor(w), a=torch.tensor(a))
# elif pr == "c":
# tt = ebayesthresh_torch.vecbinsolv(z, ebayesthresh_torch.beta_cauchy, 0, 10, 30, w=torch.tensor(w))
# else:
# z = 0
# if pr == "l":
# if isinstance(s, (int, float, str, bool)) and not isinstance(w, (int, float, str, bool)):
# zz = torch.zeros(max(len(str(s)), w.numel()), dtype=torch.float) # numpy 배열을 torch 텐서로 변환
# elif not isinstance(s, (int, float, str, bool)) and isinstance(w, (int, float, str, bool)):
# zz = torch.zeros(max(len(s), len(str(w))), dtype=torch.float) # numpy 배열을 torch 텐서로 변환
# elif isinstance(s, (int, float, str, bool)) and isinstance(w, (int, float, str, bool)):
# zz = torch.zeros(max(len(str(s)), len(str(w))), dtype=torch.float) # numpy 배열을 torch 텐서로 변환
# else:
# zz = torch.tensor([0] * max(len(s), w.numel()), dtype=torch.float) # numpy 배열을 torch 텐서로 변환
# tt = ebayesthresh_torch.vecbinsolv(zz, ebayesthresh_torch.laplace_threshzero, 0, s * (25 + s * a), 30, s=torch.tensor(s), w=torch.tensor(w), a=torch.tensor(a))
# elif pr == "c":
# tt = ebayesthresh_torch.vecbinsolv(z, ebayesthresh_torch.cauchy_threshzero, 0, 10, 30, w=torch.tensor(w))
tt
tensor([3.115211963653564, 2.816306114196777])
R코드
<- function(w, s = 1, prior = "laplace", bayesfac = FALSE, a = 0.5) {
tfromw #
# Given the vector of weights w and s (sd), find the threshold or
# vector of thresholds corresponding to these weights, under the
# specified prior.
# If bayesfac=TRUE the Bayes factor thresholds are found, otherwise
# the posterior median thresholds are found.
# If the Laplace prior is used, a gives the value of the inverse scale
# (i.e., rate) parameter
#
<- substring(prior, 1, 1)
pr if(bayesfac) {
<- 1/w - 2
z if(pr == "l"){
if(length(w)>=length(s)) {
<- z
zz else { zz <- rep(z, length(s)) }
} <- vecbinsolv(zz, beta.laplace, 0, 10, s = s, a = a)
tt
}if(pr == "c")
<- vecbinsolv(z, beta.cauchy, 0, 10)
tt
}else {
<- 0
z if(pr == "l"){
<- rep(0, max(length(s), length(w)))
zz
# When x/s-s*a>25, laplace.threshzero has value
# close to 1/2; The boundary value of x can be
# treated as the upper bound for search.
<- vecbinsolv(zz, laplace.threshzero, 0, s*(25+s*a),
tt s = s, w = w, a = a)
}if(pr == "c")
<- vecbinsolv(z, cauchy.threshzero, 0, 10, w = w)
tt
}return(tt)
}
결과
- Python
0.05, 0.1]), s = 1) ebayesthresh_torch.tfromw(torch.tensor([
tensor([3.115211963653564, 2.816306114196777])
0.05, 0.1]), prior = "cauchy", bayesfac = True) ebayesthresh_torch.tfromw(torch.tensor([
tensor([3.259634971618652, 2.959740638732910])
- R
> tfromw(c(0.05, 0.1), s = 1)
1] 3.115212 2.816306
[> tfromw(c(0.05, 0.1), prior = "cauchy", bayesfac = TRUE)
1] 3.259635 2.959740 [
tfromx
Given a vector of data and standard deviations (sd equals 1 for Cauchy prior), find the value or vector (heterogeneous sampling standard deviation with Laplace prior) of thresholds corresponding to the marginal maximum likelihood choice of weight.
데이터가 주어졌을때, 가중치의 한계 최대 우도로 임계값 찾는 함수
=torch.tensor([0.05,0.1])
x=1
s="laplace"
prior=False
bayesfac=torch.tensor(0.5)
a=True universalthresh
= torch.cat((torch.randn(90), torch.randn(10) + 5), dim=0)
x = "cauchy"
prior =1
s=False
bayesfac=torch.tensor(0.5)
a=True universalthresh
= prior[0:1] pr
if pr == "c":
= 1 s
if pr == "l" and torch.isnan(a):
= ebayesthresh_torch.wandafromx(x, s, universalthresh)
wa = wa['w']
w = wa['a']
a else:
= ebayesthresh_torch.wfromx(x, s, prior=prior, a=a) w
=prior, bayesfac=bayesfac, a=a) ebayesthresh_torch.tfromw(w, s, prior
tensor([2.263044834136963, 2.263044834136963, 2.263044834136963])
R코드
- Python
=torch.tensor([0.05,0.1]), s = 1, prior = "laplace", bayesfac = False, a = 0.5, universalthresh = True) ebayesthresh_torch.tfromx(x
tensor(1.177409887313843)
= torch.cat((torch.randn(90), torch.randn(10) + 5), dim=0), prior = "cauchy") ebayesthresh_torch.tfromx(x
tensor(2.231245040893555)
- R
> tfromx(x=c(0.05,0.1), s = 1, prior = "laplace", bayesfac = FALSE, a = 0.5,universalthresh = TRUE)
1] 1.17741
[> tfromx(x=c(rnorm(90, mean=0, sd=1), rnorm(10, mean=5, sd=1)), prior = "cauchy")
1] 2.301196 [
wpost_laplace
Calculate the posterior weight for non-zero effect
- 0이 아닌 효과에 대한 사후 가중치 계산
= 0.5
w= torch.tensor([-2.0,1.0,0.0,-4.0,8.0,50.0])
x = 1
s = 0.5 a
= ebayesthresh_torch.beta_laplace(x, s, a)
laplace_beta laplace_beta
tensor([ 8.898520296511427e-01, -3.800417166060107e-01, -5.618177717731538e-01,
2.854594666723506e+02, 1.026980615772411e+12, 6.344539544172600e+265],
dtype=torch.float64)
1 - (1 - w) / (1 + w * laplace_beta)
tensor([0.653961521302972, 0.382700153299694, 0.304677821507423,
0.996521248676984, 0.999999999999026, 1.000000000000000],
dtype=torch.float64)
R코드
<- function(w, x, s = 1, a = 0.5)
wpost.laplace #
# Calculate the posterior weight for non-zero effect
#
1 - (1 - w)/(1 + w * beta.laplace(x, s, a))
결과
- Python
0.5,torch.tensor([-2,1,0,-4,8,50])) ebayesthresh_torch.wpost_laplace(
tensor([0.653961521302972, 0.382700153299694, 0.304677821507423,
0.996521248676984, 0.999999999999026, 1.000000000000000],
dtype=torch.float64)
- R
이베이즈 깃헙에 있는데 R 패키지에는 없음.
zetafromx
Description
Suppose a sequence of data has underlying mean vector with elements \(\theta_i\). Given the sequence of data, and a vector of scale factors cs and a lower limit pilo, this routine finds the marginal maximum likelihood estimate of the parameter zeta such that the prior probability of \(\theta_i\) being nonzero is of the form median(pilo, zeta*cs, 1).
파라메터 제타의 최대 우도 추정치 계산
= torch.tensor([-2.0,1.0,0.0,-4.0,8.0,50.0])
xd = torch.tensor([2.0,3.0,5.0,6.0,1.0,-1.0])
cs = None
pilo ="laplace"
prior# priir="c"
=torch.tensor(0.5) a
= prior[0:1]
pr pr
'l'
= len(xd)
nx nx
6
if pilo is None:
= ebayesthresh_torch.wfromt(torch.sqrt(2 * torch.log(torch.tensor(nx, dtype=torch.float))), prior=prior, a=a) pilo
pilo
tensor(0.412115022681705, dtype=torch.float64)
if pr == "l":
= ebayesthresh_torch.beta_laplace(xd, a=a)
beta elif pr == "c":
= ebayesthresh_torch.beta_cauchy(xd) beta
beta
tensor([ 8.898520296511427e-01, -3.800417166060107e-01, -5.618177717731538e-01,
2.854594666723506e+02, 1.026980615772411e+12, 6.344539544172600e+265],
dtype=torch.float64)
= pilo / cs
zs1 zs1
tensor([ 0.206057518720627, 0.137371674180031, 0.082423008978367,
0.068685837090015, 0.412115037441254, -0.412115037441254])
= 1 / cs
zs2 zs2
tensor([ 0.500000000000000, 0.333333343267441, 0.200000002980232,
0.166666671633720, 1.000000000000000, -1.000000000000000])
= torch.sort(torch.unique(torch.cat((zs1, zs2)))).values
zj zj
tensor([-1.000000000000000, -0.412115037441254, 0.068685837090015,
0.082423008978367, 0.137371674180031, 0.166666671633720,
0.200000002980232, 0.206057518720627, 0.333333343267441,
0.412115037441254, 0.500000000000000, 1.000000000000000])
= cs * beta
cb cb
tensor([ 1.779704059302285e+00, -1.140125149818032e+00,
-2.809088858865769e+00, 1.712756800034103e+03,
1.026980615772411e+12, -6.344539544172600e+265], dtype=torch.float64)
= len(zj)
mz mz
12
= None
zlmax zlmax
= torch.zeros(mz, dtype=torch.bool)
lmin lmin
tensor([False, False, False, False, False, False, False, False, False, False,
False, False])
for j in range(1, mz - 1):
= zj[j]
ze = cb[(ze > zs1) & (ze <= zs2)]
cbil = torch.sum(cbil / (1 + ze * cbil))
ld if ld <= 0:
= cb[(ze >= zs1) & (ze < zs2)]
cbir = torch.sum(cbir / (1 + ze * cbir))
rd = rd >= 0 lmin[j]
cbir
tensor([1.779704059302285], dtype=torch.float64)
rd
tensor(1.117038220864095, dtype=torch.float64)
lmin
tensor([False, True, True, False, False, False, False, False, True, False,
False, False])
= cb[zj[0] == zs1]
cbir cbir
tensor([], dtype=torch.float64)
= torch.sum(cbir / (1 + zj[0] * cbir))
rd rd
tensor(0., dtype=torch.float64)
if rd > 0:
0] = True
lmin[else:
= zj[0].tolist() zlmax
zlmax
-1.0
= cb[zj[mz - 1] == zs2]
cbil cbil
tensor([1.026980615772411e+12], dtype=torch.float64)
= cb[zj[mz - 1] == zs2]
cbil cbil
tensor([1.026980615772411e+12], dtype=torch.float64)
= torch.sum(cbil / (1 + zj[mz - 1] * cbil))
ld ld
tensor(0.999999999999026, dtype=torch.float64)
if ld < 0:
- 1] = True
lmin[mz else:
= [(zj[mz - 1]).tolist()] zlmax
= zj[lmin]
zlmin zlmin
tensor([-0.412115037441254, 0.068685837090015, 0.333333343267441])
= len(zlmin)
nlmin nlmin
3
for j in range(1, nlmin):
= zlmin[j - 1]
zlo = zlmin[j]
zhi = (zlo + zhi) / 2.0
ze = (zhi - zlo) / 2.0
zstep for nit in range(1,11):
= cb[(ze >= zs1) & (ze <= zs2)]
cbi = torch.sum(cbi / (1 + ze * cbi))
likd /= 2.0
zstep += zstep * torch.sign(likd)
ze zlmax.append(ze.item())
= torch.tensor(zlmax)
zlmax zlmax
tensor([ 1.000000000000000, -0.171714603900909, 0.155910953879356])
= torch.full((len(zlmax),), float('nan'))
zm zm
tensor([nan, nan, nan])
zlmax
tensor([ 1.000000000000000, -0.171714603900909, 0.155910953879356])
for j in range(len(zlmax)):
= torch.maximum(zs1, torch.min(zlmax[j],zs2))
pz = torch.sum(torch.log(1 + cb * pz)) zm[j]
pz
tensor([ 0.206057518720627, 0.155910953879356, 0.155910953879356,
0.155910953879356, 0.412115037441254, -0.412115037441254])
zm
tensor([643.794677734375000, 642.572204589843750, 643.049011230468750])
= zlmax[zm == torch.max(zm)]
zeta_candidates zeta_candidates
tensor([1.])
= torch.min(zeta_candidates)
zeta zeta
tensor(1.)
= torch.clamp(torch.min(torch.tensor([1.0], dtype=torch.float), torch.max(zeta * cs, pilo)), min=0.0, max=1.0)
w w
tensor([1.000000000000000, 1.000000000000000, 1.000000000000000,
1.000000000000000, 1.000000000000000, 0.412115037441254])
R 코드
<- function(xd, cs, pilo = NA, prior = "laplace", a = 0.5) {
zetafromx #
# Given a sequence xd, a vector of scale factors cs and
# a lower limit pilo, find the marginal maximum likelihood
# estimate of the parameter zeta such that the prior prob
# is of the form median( pilo, zeta*cs, 1)
#
# If pilo=NA then it is calculated according to the sample size
# to corrrespond to the universal threshold
#
# Find the beta values and the minimum weight if necessary
#
# Current version allows for standard deviation of 1 only.
#
<- substring(prior, 1, 1)
pr <- length(xd)
nx if (is.na(pilo))
<- wfromt(sqrt(2 * log(length(xd))), prior=prior, a=a)
pilo if(pr == "l")
<- beta.laplace(xd, a=a)
beta if(pr == "c") beta <- beta.cauchy(xd)
#
# Find jump points zj in derivative of log likelihood as function
# of z, and other preliminary calculations
#
<- pilo/cs
zs1 <- 1/cs
zs2 <- sort(unique(c(zs1, zs2)))
zj <- cs * beta
cb <- length(zj)
mz <- NULL
zlmax
# Find left and right derivatives at each zj and check which are
# local minima Check internal zj first
<- rep(FALSE, mz)
lmin for(j in (2:(mz - 1))) {
<- zj[j]
ze <- cb[(ze > zs1) & (ze <= zs2)]
cbil <- sum(cbil/(1 + ze * cbil))
ld if(ld <= 0) {
<- cb[(ze >= zs1) & (ze < zs2)]
cbir <- sum(cbir/(1 + ze * cbir))
rd <- (rd >= 0)
lmin[j]
}
}#
# Deal with the two end points in turn, finding right deriv
# at lower end and left deriv at upper
#
# In each case the corresponding end point is either a local min
# or a local max depending on the sign of the relevant deriv
#
<- cb[zj[1] == zs1]
cbir <- sum(cbir/(1 + zj[1] * cbir))
rd if(rd > 0) lmin[1] <- TRUE else zlmax <- zj[1]
<- cb[zj[mz] == zs2]
cbil <- sum(cbil/(1 + zj[mz] * cbil))
ld if(ld < 0) lmin[mz] <- TRUE else zlmax <- zj[mz]
# Flag all local minima and do a binary search between them to find
# the local maxima
#
<- zj[lmin]
zlmin <- length(zlmin)
nlmin if(nlmin >= 2) for(j in (2:nlmin)) {
<- zlmin[j - 1]
zlo <- zlmin[j]
zhi <- (zlo + zhi)/2
ze <- (zhi - zlo)/2
zstep for(nit in (1:10)) {
<- cb[(ze >= zs1) & (ze <= zs2)]
cbi <- sum(cbi/(1 + ze * cbi))
likd <- zstep/2
zstep <- ze + zstep * sign(likd)
ze
}<- c(zlmax, ze)
zlmax
}#
# Evaluate all local maxima and find global max; use smaller value
# if there is an exact tie for the global maximum.
#
<- length(zlmax)
nlmax <- rep(NA, nlmax)
zm for(j in (1:nlmax)) {
<- pmax(zs1, pmin(zlmax[j], zs2))
pz <- sum(log(1 + cb * pz))
zm[j]
}<- zlmax[zm == max(zm)]
zeta <- min(zeta)
zeta <- pmin(1, pmax(zeta*cs, pilo))
w return(list(zeta=zeta, w=w, cs=cs, pilo=pilo))
}
결과
- Python
-2,1,0,-4,8,50]),torch.tensor([2,3,5,6,1,-1])) ebayesthresh_torch.zetafromx(torch.tensor([
{'zeta': 1.0,
'w': tensor([1.000000000000000, 1.000000000000000, 1.000000000000000,
1.000000000000000, 1.000000000000000, 0.412115037441254]),
'cs': tensor([ 2, 3, 5, 6, 1, -1]),
'pilo': tensor(0.412115022681705, dtype=torch.float64)}
- R
> zetafromx(c(-2,1,0,-4,8,50),c(2,3,5,6,1,-1))
$zeta
1] 1
[
$w
1] 1.000000 1.000000 1.000000 1.000000 1.000000 0.412115
[
$cs
1] 2 3 5 6 1 -1
[
$pilo
1] 0.412115 [
Layer
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
def make_Psi(T):
= torch.zeros((T,T))
W for i in range(T):
for j in range(T):
if i==j :
= 0
W[i,j] elif torch.abs(torch.tensor(i - j)) <= 1 :
= 1
W[i,j] = (W.sum(dim=1)).clone().detach().requires_grad_(True)
d = torch.diag(d)
D = torch.diag(1/torch.sqrt(d)) @ (D-W) @ torch.diag(1/torch.sqrt(d))
L = torch.linalg.eigh(L)
lamb, Psi = Psi.detach()
Psi return Psi
= 100
T = torch.arange(T)/T * 10
t = 3*torch.sin(0.5*t) + 1.2*torch.sin(1.0*t) + 0.5*torch.sin(1.2*t)
y_true = y_true + torch.randn(T)
y =(10,6))
plt.figure(figsize plt.plot(t,y_true)
'o')
plt.plot(t,y,'--') plt.plot(t,y_true,
= y.clone().detach().requires_grad_(True)
f if len(f.shape)==1: f = f.reshape(-1,1)
= f.shape
T,N = make_Psi(T)
Psi = Psi.T @ f # apply dft fbar
**2) # periodogram plt.plot(t,fbar.detach()
= ebayesthresh_torch.ebayesthresh(fbar.detach()[:,0]) fbar_threshed
# fbar_threshed = np.stack([ebayesthresh(FloatVector(fbar[:,i])) for i in range(N)],axis=1)
**2)) # periodogram
plt.plot((fbar.detach()**2)) plt.plot((fbar_threshed
**2)[20:80]) # periodogram
plt.plot((fbar.detach()**2)[20:80]) plt.plot((fbar_threshed
= Psi @ fbar_threshed.float() # inverse dft
yhat =(10,6))
plt.figure(figsize'.')
plt.plot(t,y,'--')
plt.plot(t,y_true, plt.plot(t,yhat.detach())
class ebayesthresh_nn(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self,input):
return ebayesthresh_torch.ebayesthresh(input)
= ebayesthresh_nn() thresh_layer
0]) plt.plot(fbar.detach()[:,
0])) plt.plot(thresh_layer(fbar.detach()[:,