Skip to content

Commit

Permalink
Support multi-threadhing for ranking evaluation
Browse files Browse the repository at this point in the history
Top-k recommendation for every single user is costly. It'd be
recommended to parallelize whenever possible.
  • Loading branch information
takuti committed Apr 3, 2022
1 parent 2b6e9cf commit 45025aa
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions src/evaluation/evaluate.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,17 @@ function evaluate(recommender::Recommender, truth_data::DataAccessor,
validate(recommender, truth_data)
n_users, n_items = size(truth_data.R)

accum = 0.0
accum = Threads.Atomic{Float64}(0.0)

for u in 1:n_users
Threads.@threads for u in 1:n_users
observed_items = findall(!iszero, truth_data.R[u, :])
if length(observed_items) == 0; continue; end
truth = [first(t) for t in sort(collect(zip(observed_items, truth_data.R[u, observed_items])), by=t->last(t), rev=true)]
candidates = findall(iszero, recommender.data.R[u, :]) # items that were unobserved as of building the model
pred = [first(item_score_pair) for item_score_pair in recommend(recommender, u, topk, candidates)]
accum += measure(metric, truth, pred, topk)
Threads.atomic_add!(accum, measure(metric, truth, pred, topk))
end

# return average accuracy over the all target users
accum / n_users
accum[] / n_users
end

0 comments on commit 45025aa

Please sign in to comment.