- r - 以节省内存的方式增长 data.frame
- ruby-on-rails - ruby/ruby on rails 内存泄漏检测
- android - 无法解析导入android.support.v7.app
- UNIX 域套接字与共享内存(映射文件)
我正在编写一个使用 Gibbs 采样的贝叶斯推理包。由于这些方法通常在计算上很昂贵,因此我非常关心我的代码的性能。事实上,速度是我从 Python 转到 Julia 的原因。
实现后Dirichlet Process Model我使用 Coverage.jl 分析了代码和 --track-allocation=user
命令行选项。
这是覆盖结果
- #=
- DPM
-
- Dirichlet Process Mixture Models
-
- 25/08/2015
- Adham Beyki, odinay@gmail.com
-
- =#
-
- type DPM{T}
- bayesian_component::T
- K::Int64
- aa::Float64
- a1::Float64
- a2::Float64
- K_hist::Vector{Int64}
- K_zz_dict::Dict{Int64, Vector{Int64}}
-
- DPM{T}(c::T, K::Int64, aa::Float64, a1::Float64, a2::Float64) = new(c, K, aa, a1, a2,
- Int64[], (Int64 => Vector{Int64})[])
- end
1 DPM{T}(c::T, K::Int64, aa::Real, a1::Real, a2::Real) = DPM{typeof(c)}(c, K, convert(Float64, aa),
- convert(Float64, a1), convert(Float64, a2))
-
- function Base.show(io::IO, dpm::DPM)
- println(io, "Dirichlet Mixture Model with $(dpm.K) $(typeof(dpm.bayesian_component)) components")
- end
-
- function initialize_gibbs_sampler!(dpm::DPM, zz::Vector{Int64})
- # populates the cluster labels randomly
1 zz[:] = rand(1:dpm.K, length(zz))
- end
-
- function DPM_sample_hyperparam(aa::Float64, a1::Float64, a2::Float64, K::Int64, NN::Int64, iters::Int64)
-
- # resampling concentration parameter based on Escobar and West 1995
352 for n = 1:iters
3504 eta = rand(Distributions.Beta(aa+1, NN))
3504 rr = (a1+K-1) / (NN*(a2-log(NN)))
3504 pi_eta = rr / (1+rr)
-
3504 if rand() < pi_eta
0 aa = rand(Distributions.Gamma(a1+K, 1/(a2-log(eta))))
- else
3504 aa = rand(Distributions.Gamma(a1+K-1, 1/(a2-log(eta))))
- end
- end
352 aa
- end
-
- function DPM_sample_pp{T1, T2}(
- bayesian_components::Vector{T1},
- xx::T2,
- nn::Vector{Float64},
- pp::Vector{Float64},
- aa::Float64)
-
1760000 K = length(nn)
1760000 @inbounds for kk = 1:K
11384379 pp[kk] = log(nn[kk]) + logpredictive(bayesian_components[kk], xx)
- end
1760000 pp[K+1] = log(aa) + logpredictive(bayesian_components[K+1], xx)
1760000 normalize_pp!(pp, K+1)
1760000 return sample(pp[1:K+1])
- end
-
-
- function collapsed_gibbs_sampler!{T1, T2}(
- dpm::DPM{T1},
- xx::Vector{T2},
- zz::Vector{Int64},
- n_burnins::Int64, n_lags::Int64, n_samples::Int64, n_internals::Int64; max_clusters::Int64=100)
-
-
2 NN = length(xx) # number of data points
2 nn = zeros(Float64, dpm.K) # count array
2 n_iterations = n_burnins + (n_samples)*(n_lags+1)
2 bayesian_components = [deepcopy(dpm.bayesian_component) for k = 1:dpm.K+1]
2 dpm.K_hist = zeros(Int64, n_iterations)
2 pp = zeros(Float64, max_clusters)
-
2 tic()
2 for ii = 1:NN
10000 kk = zz[ii]
10000 additem(bayesian_components[kk], xx[ii])
10000 nn[kk] += 1
- end
2 dpm.K_hist[1] = dpm.K
2 elapsed_time = toq()
-
2 for iteration = 1:n_iterations
-
352 println("iteration: $iteration, KK: $(dpm.K), KK mode: $(indmax(hist(dpm.K_hist,
- 0.5:maximum(dpm.K_hist)+0.5)[2])), elapsed time: $elapsed_time")
-
352 tic()
352 @inbounds for ii = 1:NN
1760000 kk = zz[ii]
1760000 nn[kk] -= 1
1760000 delitem(bayesian_components[kk], xx[ii])
-
- # remove the cluster if empty
1760000 if nn[kk] == 0
166 println("\tcomponent $kk has become inactive")
166 splice!(nn, kk)
166 splice!(bayesian_components, kk)
166 dpm.K -= 1
-
- # shifting the labels one cluster back
830166 idx = find(x -> x>kk, zz)
166 zz[idx] -= 1
- end
-
1760000 kk = DPM_sample_pp(bayesian_components, xx[ii], nn, pp, dpm.aa)
-
1760000 if kk == dpm.K+1
171 println("\tcomponent $kk activated.")
171 push!(bayesian_components, deepcopy(dpm.bayesian_component))
171 push!(nn, 0)
171 dpm.K += 1
- end
-
1760000 zz[ii] = kk
1760000 nn[kk] += 1
1760000 additem(bayesian_components[kk], xx[ii])
- end
-
352 dpm.aa = DPM_sample_hyperparam(dpm.aa, dpm.a1, dpm.a2, dpm.K, NN, n_internals)
352 dpm.K_hist[iteration] = dpm.K
352 dpm.K_zz_dict[dpm.K] = deepcopy(zz)
352 elapsed_time = toq()
- end
- end
-
- function truncated_gibbs_sampler{T1, T2}(dpm::DPM{T1}, xx::Vector{T2}, zz::Vector{Int64},
- n_burnins::Int64, n_lags::Int64, n_samples::Int64, n_internals::Int64, K_truncation::Int64)
-
- NN = length(xx) # number of data points
- nn = zeros(Int64, K_truncation) # count array
- bayesian_components = [deepcopy(dpm.bayesian_component) for k = 1:K_truncation]
- n_iterations = n_burnins + (n_samples)*(n_lags+1)
- dpm.K_hist = zeros(Int64, n_iterations)
- states = (ASCIIString => Int64)[]
- n_states = 0
-
- tic()
- for ii = 1:NN
- kk = zz[ii]
- additem(bayesian_components[kk], xx[ii])
- nn[kk] += 1
- end
- dpm.K_hist[1] = dpm.K
-
- # constructing the sticks
- beta_VV = rand(Distributions.Beta(1.0, dpm.aa), K_truncation)
- beta_VV[end] = 1.0
- π = ones(Float64, K_truncation)
- π[2:end] = 1 - beta_VV[1:K_truncation-1]
- π = log(beta_VV) + log(cumprod(π))
-
- elapsed_time = toq()
-
- for iteration = 1:n_iterations
-
- println("iteration: $iteration, # active components: $(length(findn(nn)[1])), mode: $(indmax(hist(dpm.K_hist,
- 0.5:maximum(dpm.K_hist)+0.5)[2])), elapsed time: $elapsed_time \n", nn)
-
- tic()
- for ii = 1:NN
- kk = zz[ii]
- nn[kk] -= 1
- delitem(bayesian_components[kk], xx[ii])
-
- # resampling label
- pp = zeros(Float64, K_truncation)
- for kk = 1:K_truncation
- pp[kk] = π[kk] + logpredictive(bayesian_components[kk], xx[ii])
- end
- pp = exp(pp - maximum(pp))
- pp /= sum(pp)
-
- # sample from pp
- kk = sampleindex(pp)
- zz[ii] = kk
- nn[kk] += 1
- additem(bayesian_components[kk], xx[ii])
-
- for kk = 1:K_truncation-1
- gamma1 = 1 + nn[kk]
- gamma2 = dpm.aa + sum(nn[kk+1:end])
- beta_VV[kk] = rand(Distributions.Beta(gamma1, gamma2))
- end
- beta_VV[end] = 1.0
- π = ones(Float64, K_truncation)
- π[2:end] = 1 - beta_VV[1:K_truncation-1]
- π = log(beta_VV) + log(cumprod(π))
-
- # resampling concentration parameter based on Escobar and West 1995
- for internal_iters = 1:n_internals
- eta = rand(Distributions.Beta(dpm.aa+1, NN))
- rr = (dpm.a1+dpm.K-1) / (NN*(dpm.a2-log(NN)))
- pi_eta = rr / (1+rr)
-
- if rand() < pi_eta
- dpm.aa = rand(Distributions.Gamma(dpm.a1+dpm.K, 1/(dpm.a2-log(eta))))
- else
- dpm.aa = rand(Distributions.Gamma(dpm.a1+dpm.K-1, 1/(dpm.a2-log(eta))))
- end
- end
- end
-
- nn_string = nn2string(nn)
- if !haskey(states, nn_string)
- n_states += 1
- states[nn_string] = n_states
- end
- dpm.K_hist[iteration] = states[nn_string]
- dpm.K_zz_dict[states[nn_string]] = deepcopy(zz)
- elapsed_time = toq()
- end
- return states
- end
-
-
- function posterior{T1, T2}(dpm::DPM{T1}, xx::Vector{T2}, K::Int64, K_truncation::Int64=0)
2 n_components = 0
1 if K_truncation == 0
1 n_components = K
- else
0 n_components = K_truncation
- end
-
1 bayesian_components = [deepcopy(dpm.bayesian_component) for kk=1:n_components]
1 zz = dpm.K_zz_dict[K]
-
1 NN = length(xx)
1 nn = zeros(Int64, n_components)
-
1 for ii = 1:NN
5000 kk = zz[ii]
5000 additem(bayesian_components[kk], xx[ii])
5000 nn[kk] += 1
- end
-
1 return([posterior(bayesian_components[kk]) for kk=1:n_components], nn)
- end
-
这是内存分配:
- #=
- DPM
-
- Dirichlet Process Mixture Models
-
- 25/08/2015
- Adham Beyki, odinay@gmail.com
-
- =#
-
- type DPM{T}
- bayesian_component::T
- K::Int64
- aa::Float64
- a1::Float64
- a2::Float64
- K_hist::Vector{Int64}
- K_zz_dict::Dict{Int64, Vector{Int64}}
-
- DPM{T}(c::T, K::Int64, aa::Float64, a1::Float64, a2::Float64) = new(c, K, aa, a1, a2,
- Int64[], (Int64 => Vector{Int64})[])
- end
0 DPM{T}(c::T, K::Int64, aa::Real, a1::Real, a2::Real) = DPM{typeof(c)}(c, K, convert(Float64, aa),
- convert(Float64, a1), convert(Float64, a2))
-
- function Base.show(io::IO, dpm::DPM)
- println(io, "Dirichlet Mixture Model with $(dpm.K) $(typeof(dpm.bayesian_component)) components")
- end
-
- function initialize_gibbs_sampler!(dpm::DPM, zz::Vector{Int64})
- # populates the cluster labels randomly
0 zz[:] = rand(1:dpm.K, length(zz))
- end
-
- function DPM_sample_hyperparam(aa::Float64, a1::Float64, a2::Float64, K::Int64, NN::Int64, iters::Int64)
-
- # resampling concentration parameter based on Escobar and West 1995
0 for n = 1:iters
0 eta = rand(Distributions.Beta(aa+1, NN))
0 rr = (a1+K-1) / (NN*(a2-log(NN)))
0 pi_eta = rr / (1+rr)
-
0 if rand() < pi_eta
0 aa = rand(Distributions.Gamma(a1+K, 1/(a2-log(eta))))
- else
0 aa = rand(Distributions.Gamma(a1+K-1, 1/(a2-log(eta))))
- end
- end
0 aa
- end
-
- function DPM_sample_pp{T1, T2}(
- bayesian_components::Vector{T1},
- xx::T2,
- nn::Vector{Float64},
- pp::Vector{Float64},
- aa::Float64)
-
0 K = length(nn)
0 @inbounds for kk = 1:K
0 pp[kk] = log(nn[kk]) + logpredictive(bayesian_components[kk], xx)
- end
0 pp[K+1] = log(aa) + logpredictive(bayesian_components[K+1], xx)
0 normalize_pp!(pp, K+1)
0 return sample(pp[1:K+1])
- end
-
-
- function collapsed_gibbs_sampler!{T1, T2}(
- dpm::DPM{T1},
- xx::Vector{T2},
- zz::Vector{Int64},
- n_burnins::Int64, n_lags::Int64, n_samples::Int64, n_internals::Int64; max_clusters::Int64=100)
-
-
191688 NN = length(xx) # number of data points
96 nn = zeros(Float64, dpm.K) # count array
0 n_iterations = n_burnins + (n_samples)*(n_lags+1)
384 bayesian_components = [deepcopy(dpm.bayesian_component) for k = 1:dpm.K+1]
2864 dpm.K_hist = zeros(Int64, n_iterations)
176 pp = zeros(Float64, max_clusters)
-
48 tic()
0 for ii = 1:NN
0 kk = zz[ii]
0 additem(bayesian_components[kk], xx[ii])
0 nn[kk] += 1
- end
0 dpm.K_hist[1] = dpm.K
0 elapsed_time = toq()
-
0 for iteration = 1:n_iterations
-
5329296 println("iteration: $iteration, KK: $(dpm.K), KK mode: $(indmax(hist(dpm.K_hist,
- 0.5:maximum(dpm.K_hist)+0.5)[2])), elapsed time: $elapsed_time")
-
16800 tic()
28000000 @inbounds for ii = 1:NN
0 kk = zz[ii]
0 nn[kk] -= 1
0 delitem(bayesian_components[kk], xx[ii])
-
- # remove the cluster if empty
0 if nn[kk] == 0
161880 println("\tcomponent $kk has become inactive")
0 splice!(nn, kk)
0 splice!(bayesian_components, kk)
0 dpm.K -= 1
-
- # shifting the labels one cluster back
69032 idx = find(x -> x>kk, zz)
42944 zz[idx] -= 1
- end
-
0 kk = DPM_sample_pp(bayesian_components, xx[ii], nn, pp, dpm.aa)
-
0 if kk == dpm.K+1
158976 println("\tcomponent $kk activated.")
14144 push!(bayesian_components, deepcopy(dpm.bayesian_component))
4872 push!(nn, 0)
0 dpm.K += 1
- end
-
0 zz[ii] = kk
0 nn[kk] += 1
0 additem(bayesian_components[kk], xx[ii])
- end
-
0 dpm.aa = DPM_sample_hyperparam(dpm.aa, dpm.a1, dpm.a2, dpm.K, NN, n_internals)
0 dpm.K_hist[iteration] = dpm.K
14140000 dpm.K_zz_dict[dpm.K] = deepcopy(zz)
0 elapsed_time = toq()
- end
- end
-
- function truncated_gibbs_sampler{T1, T2}(dpm::DPM{T1}, xx::Vector{T2}, zz::Vector{Int64},
- n_burnins::Int64, n_lags::Int64, n_samples::Int64, n_internals::Int64, K_truncation::Int64)
-
- NN = length(xx) # number of data points
- nn = zeros(Int64, K_truncation) # count array
- bayesian_components = [deepcopy(dpm.bayesian_component) for k = 1:K_truncation]
- n_iterations = n_burnins + (n_samples)*(n_lags+1)
- dpm.K_hist = zeros(Int64, n_iterations)
- states = (ASCIIString => Int64)[]
- n_states = 0
-
- tic()
- for ii = 1:NN
- kk = zz[ii]
- additem(bayesian_components[kk], xx[ii])
- nn[kk] += 1
- end
- dpm.K_hist[1] = dpm.K
-
- # constructing the sticks
- beta_VV = rand(Distributions.Beta(1.0, dpm.aa), K_truncation)
- beta_VV[end] = 1.0
- π = ones(Float64, K_truncation)
- π[2:end] = 1 - beta_VV[1:K_truncation-1]
- π = log(beta_VV) + log(cumprod(π))
-
- elapsed_time = toq()
-
- for iteration = 1:n_iterations
-
- println("iteration: $iteration, # active components: $(length(findn(nn)[1])), mode: $(indmax(hist(dpm.K_hist,
- 0.5:maximum(dpm.K_hist)+0.5)[2])), elapsed time: $elapsed_time \n", nn)
-
- tic()
- for ii = 1:NN
- kk = zz[ii]
- nn[kk] -= 1
- delitem(bayesian_components[kk], xx[ii])
-
- # resampling label
- pp = zeros(Float64, K_truncation)
- for kk = 1:K_truncation
- pp[kk] = π[kk] + logpredictive(bayesian_components[kk], xx[ii])
- end
- pp = exp(pp - maximum(pp))
- pp /= sum(pp)
-
- # sample from pp
- kk = sampleindex(pp)
- zz[ii] = kk
- nn[kk] += 1
- additem(bayesian_components[kk], xx[ii])
-
- for kk = 1:K_truncation-1
- gamma1 = 1 + nn[kk]
- gamma2 = dpm.aa + sum(nn[kk+1:end])
- beta_VV[kk] = rand(Distributions.Beta(gamma1, gamma2))
- end
- beta_VV[end] = 1.0
- π = ones(Float64, K_truncation)
- π[2:end] = 1 - beta_VV[1:K_truncation-1]
- π = log(beta_VV) + log(cumprod(π))
-
- # resampling concentration parameter based on Escobar and West 1995
- for internal_iters = 1:n_internals
- eta = rand(Distributions.Beta(dpm.aa+1, NN))
- rr = (dpm.a1+dpm.K-1) / (NN*(dpm.a2-log(NN)))
- pi_eta = rr / (1+rr)
-
- if rand() < pi_eta
- dpm.aa = rand(Distributions.Gamma(dpm.a1+dpm.K, 1/(dpm.a2-log(eta))))
- else
- dpm.aa = rand(Distributions.Gamma(dpm.a1+dpm.K-1, 1/(dpm.a2-log(eta))))
- end
- end
- end
-
- nn_string = nn2string(nn)
- if !haskey(states, nn_string)
- n_states += 1
- states[nn_string] = n_states
- end
- dpm.K_hist[iteration] = states[nn_string]
- dpm.K_zz_dict[states[nn_string]] = deepcopy(zz)
- elapsed_time = toq()
- end
- return states
- end
-
-
- function posterior{T1, T2}(dpm::DPM{T1}, xx::Vector{T2}, K::Int64, K_truncation::Int64=0)
0 n_components = 0
0 if K_truncation == 0
0 n_components = K
- else
0 n_components = K_truncation
- end
-
0 bayesian_components = [deepcopy(dpm.bayesian_component) for kk=1:n_components]
0 zz = dpm.K_zz_dict[K]
-
0 NN = length(xx)
0 nn = zeros(Int64, n_components)
-
0 for ii = 1:NN
0 kk = zz[ii]
0 additem(bayesian_components[kk], xx[ii])
0 nn[kk] += 1
- end
-
0 return([posterior(bayesian_components[kk]) for kk=1:n_components], nn)
- end
-
我似乎不明白为什么例如一个只运行两次的简单分配分配了 191688 个单位(我假设单位是字节,但我不确定)。
.cov:
2 NN = length(xx) # number of data points
.mem:
191688 NN = length(xx) # number of data points
或者这个更糟:
冠状病毒:
352 @inbounds for ii = 1:NN
内存:
28000000 @inbounds for ii = 1:NN
最佳答案
简单提一下答案in the docs , “在用户设置下,直接从 REPL 调用的任何函数的第一行都将显示由于 REPL 代码本身发生的事件而分配。”也可能相关:“更重要的是,JIT 编译还增加了分配计数,因为 Julia 的大部分编译器都是用 Julia 编写的(编译通常需要内存分配)。推荐的过程是通过执行所有你想要的命令来强制编译分析,然后调用 Profile.clear_malloc_data() 重置所有分配计数器。"
底线:第一行被指责为其他地方发生的分配,因为它是重新开始报告分配的第一行。
关于memory - 如何分析 Julia 内存分配和代码覆盖结果,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/32520295/
我尝试理解[c代码 -> 汇编]代码 void node::Check( data & _data1, vector& _data2) { -> push ebp -> mov ebp,esp ->
我需要在当前表单(代码)的上下文中运行文本文件中的代码。其中一项要求是让代码创建新控件并将其添加到当前窗体。 例如,在Form1.cs中: using System.Windows.Forms; ..
我有此 C++ 代码并将其转换为 C# (.net Framework 4) 代码。有没有人给我一些关于 malloc、free 和 sprintf 方法的提示? int monate = ee; d
我的网络服务器代码有问题 #include #include #include #include #include #include #include int
给定以下 html 代码,将列表中的第三个元素(即“美丽”一词)以斜体显示的 CSS 代码是什么?当然,我可以给这个元素一个 id 或一个 class,但 html 代码必须保持不变。谢谢
关闭。这个问题不符合Stack Overflow guidelines .它目前不接受答案。 我们不允许提问寻求书籍、工具、软件库等的推荐。您可以编辑问题,以便用事实和引用来回答。 关闭 7 年前。
我试图制作一个宏来避免重复代码和注释。 我试过这个: #define GrowOnPage(any Page, any Component) Component.Width := Page.Surfa
我正在尝试将我的旧 C++ 代码“翻译”成头条新闻所暗示的 C# 代码。问题是我是 C# 中的新手,并不是所有的东西都像 C++ 中那样。在 C++ 中这些解决方案运行良好,但在 C# 中只是不能。我
在 Windows 10 上工作,R 语言的格式化程序似乎没有在 Visual Studio Code 中完成它的工作。我试过R support for Visual Studio Code和 R-T
我正在处理一些报告(计数),我必须获取不同参数的计数。非常简单但乏味。 一个参数的示例查询: qCountsEmployee = ( "select count(*) from %s wher
最近几天我尝试从 d00m 调试网络错误。我开始用尽想法/线索,我希望其他 SO 用户拥有可能有用的宝贵经验。我希望能够提供所有相关信息,但我个人无法控制服务器环境。 整个事情始于用户注意到我们应用程
我有一个 app.js 文件,其中包含如下 dojo amd 模式代码: require(["dojo/dom", ..], function(dom){ dom.byId('someId').i
我对“-gencode”语句中的“code=sm_X”选项有点困惑。 一个例子:NVCC 编译器选项有什么作用 -gencode arch=compute_13,code=sm_13 嵌入库中? 只有
我为我的表格使用 X-editable 框架。 但是我有一些问题。 $(document).ready(function() { $('.access').editable({
我一直在通过本教程学习 flask/python http://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-i-hello-wo
我想将 Vim 和 EMACS 用于 CNC、G 代码和 M 代码。 Vim 或 EMACS 是否有任何语法或模式来处理这种类型的代码? 最佳答案 一些快速搜索使我找到了 this vim 和 thi
关闭。这个问题不符合Stack Overflow guidelines .它目前不接受答案。 想改进这个问题?更新问题,使其成为 on-topic对于堆栈溢出。 7年前关闭。 Improve this
这个问题在这里已经有了答案: Enabling markdown highlighting in Vim (5 个回答) 6年前关闭。 当我在 Vim 中编辑包含 Markdown 代码的 READM
我正在 Swift3 iOS 中开发视频应用程序。基本上我必须将视频 Assets 和音频与淡入淡出效果合并为一个并将其保存到 iPhone 画廊。为此,我使用以下方法: private func d
pipeline { agent any stages { stage('Build') { steps { e
我是一名优秀的程序员,十分优秀!