From 850584dc118e90f3056c2755a94a1342ebf76ed2 Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Tue, 10 Apr 2012 11:39:37 +0100 Subject: [PATCH 01/25] First working commit, with dotted version vectors. --- src/riak_kv_get_core.erl | 4 +- src/riak_kv_get_fsm.erl | 3 +- src/riak_kv_util.erl | 2 +- src/riak_kv_vnode.erl | 73 ++++++------ src/riak_kv_wm_object.erl | 10 +- src/riak_kv_wm_utils.erl | 3 +- src/riak_object.erl | 236 +++++++++++++++++++++----------------- 7 files changed, 181 insertions(+), 150 deletions(-) diff --git a/src/riak_kv_get_core.erl b/src/riak_kv_get_core.erl index a47bc4454d..41503f8719 100644 --- a/src/riak_kv_get_core.erl +++ b/src/riak_kv_get_core.erl @@ -199,8 +199,8 @@ info(#getcore{num_ok = NumOks, num_fail = NumFail, results = Results}) -> %% ==================================================================== strict_descendant(O1, O2) -> - vclock:descends(riak_object:vclock(O1),riak_object:vclock(O2)) andalso - not vclock:descends(riak_object:vclock(O2),riak_object:vclock(O1)). + dottedvv:descends(riak_object:vclock(O1),riak_object:vclock(O2)) andalso + not dottedvv:descends(riak_object:vclock(O2),riak_object:vclock(O1)). merge(Replies, AllowMult) -> RObjs = [RObj || {_I, {ok, RObj}} <- Replies], diff --git a/src/riak_kv_get_fsm.erl b/src/riak_kv_get_fsm.erl index 1bd842bd21..64c865d153 100644 --- a/src/riak_kv_get_fsm.erl +++ b/src/riak_kv_get_fsm.erl @@ -353,8 +353,7 @@ update_stats({ok, Obj}, #state{get_usecs = GetUsecs}) -> ObjSize = size(riak_object:bucket(Obj)) + size(riak_object:key(Obj)) + - size(term_to_binary(riak_object:vclock(Obj))) + - lists:sum([size(term_to_binary(MD)) + value_size(Value) || {MD, Value} <- Contents]), + lists:sum([size(term_to_binary(MD)) + value_size(Value) + size(term_to_binary(Clock)) || {MD, Value, Clock} <- Contents]), riak_kv_stat:update({get_fsm, undefined, GetUsecs, NumSiblings, ObjSize}); update_stats(_, #state{get_usecs = GetUsecs}) -> riak_kv_stat:update({get_fsm, undefined, GetUsecs, undefined, undefined}). diff --git a/src/riak_kv_util.erl b/src/riak_kv_util.erl index 4c145ff828..f45ca514b8 100644 --- a/src/riak_kv_util.erl +++ b/src/riak_kv_util.erl @@ -61,7 +61,7 @@ is_x_deleted(Obj) -> %% deleted. Return is the atom 'undefined' if all contents %% are marked deleted, or the input Obj if any of them are not. obj_not_deleted(Obj) -> - case [{M, V} || {M, V} <- riak_object:get_contents(Obj), + case [{M, V, C} || {M, V, C} <- riak_object:get_contents(Obj), dict:is_key(<<"X-Riak-Deleted">>, M) =:= false] of [] -> undefined; _ -> Obj diff --git a/src/riak_kv_vnode.erl b/src/riak_kv_vnode.erl index 14fcd940a5..8d8e0e3ece 100644 --- a/src/riak_kv_vnode.erl +++ b/src/riak_kv_vnode.erl @@ -595,7 +595,7 @@ do_put(Sender, {Bucket,_Key}=BKey, RObj, ReqID, StartTime, Options, State) -> PruneTime = StartTime end, Coord = proplists:get_value(coord, Options, false), - PutArgs = #putargs{returnbody=proplists:get_value(returnbody,Options,false) orelse Coord, + PutArgs = #putargs{returnbody=Coord orelse proplists:get_value(returnbody,Options,false), coord=Coord, lww=proplists:get_value(last_write_wins, BProps, false), bkey=BKey, @@ -664,7 +664,7 @@ prepare_put(#state{vnodeid=VId, coord=Coord, lww=LWW, starttime=StartTime, - prunetime=PruneTime}, + prunetime=_PruneTime}, IndexBackend) -> case Mod:get(Bucket, Key, ModState) of {error, not_found, _UpdModState} -> @@ -687,7 +687,7 @@ prepare_put(#state{vnodeid=VId, {oldobj, OldObj1} -> {{false, OldObj1}, PutArgs}; {newobj, NewObj} -> - VC = riak_object:vclock(NewObj), +% VC = riak_object:vclock(NewObj), AMObj = enforce_allow_mult(NewObj, BProps), case IndexBackend of true -> @@ -697,17 +697,17 @@ prepare_put(#state{vnodeid=VId, false -> IndexSpecs = [] end, - case PruneTime of - undefined -> - ObjToStore = AMObj; - _ -> - ObjToStore = - riak_object:set_vclock(AMObj, - vclock:prune(VC, - PruneTime, - BProps)) - end, - {{true, ObjToStore}, +% case PruneTime of +% undefined -> +% ObjToStore = AMObj; +% _ -> +% ObjToStore = +% riak_object:set_vclock(AMObj, +% vclock:prune(VC, +% PruneTime, +% BProps)) +% end, + {{true, AMObj}, PutArgs#putargs{index_specs=IndexSpecs}} end end. @@ -754,8 +754,10 @@ enforce_allow_mult(Obj, BProps) -> case riak_object:get_contents(Obj) of [_] -> Obj; Mult -> - {MD, V} = select_newest_content(Mult), - riak_object:set_contents(Obj, [{MD, V}]) + Clocks = [C || {_,_,C} <- Mult], + Clock = dottedvv:merge(Clocks), + {MD, V, _VC} = select_newest_content(Mult), + riak_object:set_contents(Obj, [{MD, V, Clock}]) end end. @@ -763,7 +765,7 @@ enforce_allow_mult(Obj, BProps) -> %% choose the latest content to store for the allow_mult=false case select_newest_content(Mult) -> hd(lists:sort( - fun({MD0, _}, {MD1, _}) -> + fun({MD0, _, _}, {MD1, _, _}) -> riak_core_util:compare_dates( dict:fetch(<<"X-Riak-Last-Modified">>, MD0), dict:fetch(<<"X-Riak-Last-Modified">>, MD1)) @@ -775,7 +777,8 @@ put_merge(false, true, _CurObj, UpdObj, _VId, _StartTime) -> % coord=false, LWW= {newobj, UpdObj}; put_merge(false, false, CurObj, UpdObj, _VId, _StartTime) -> % coord=false, LWW=false ResObj = riak_object:syntactic_merge(CurObj, UpdObj), - case ResObj =:= CurObj of +% case ResObj =:= CurObj of + case dottedvv:equal(riak_object:vclock(ResObj), riak_object:vclock(CurObj)) of true -> {oldobj, CurObj}; false -> @@ -783,22 +786,24 @@ put_merge(false, false, CurObj, UpdObj, _VId, _StartTime) -> % coord=false, LWW= end; put_merge(true, true, _CurObj, UpdObj, VId, StartTime) -> % coord=false, LWW=true {newobj, riak_object:increment_vclock(UpdObj, VId, StartTime)}; -put_merge(true, false, CurObj, UpdObj, VId, StartTime) -> - UpdObj1 = riak_object:increment_vclock(UpdObj, VId, StartTime), - UpdVC = riak_object:vclock(UpdObj1), - CurVC = riak_object:vclock(CurObj), - - %% Check the coord put will replace the existing object - case vclock:get_counter(VId, UpdVC) > vclock:get_counter(VId, CurVC) andalso - vclock:descends(CurVC, UpdVC) == false andalso - vclock:descends(UpdVC, CurVC) == true of - true -> - {newobj, UpdObj1}; - false -> - %% If not, make sure it does - {newobj, riak_object:increment_vclock( - riak_object:merge(CurObj, UpdObj1), VId, StartTime)} - end. +put_merge(true, false, CurObj, UpdObj, VId, _StartTime) -> + UpdObj1 = riak_object:update_vclock(UpdObj, CurObj, VId), + ResObj = riak_object:syntactic_merge(CurObj, UpdObj1), + {newobj, ResObj}. +% UpdVC = riak_object:vclock(UpdObj1), +% CurVC = riak_object:vclock(CurObj), +% +% %% Check the coord put will replace the existing object +% case vclock:get_counter(VId, UpdVC) > vclock:get_counter(VId, CurVC) andalso +% vclock:descends(CurVC, UpdVC) == false andalso +% vclock:descends(UpdVC, CurVC) == true of +% true -> +% {newobj, UpdObj1}; +% false -> +% %% If not, make sure it does +% {newobj, riak_object:increment_vclock( +% riak_object:merge(CurObj, UpdObj1), VId, StartTime)} +% end. %% @private do_get(_Sender, BKey, ReqID, diff --git a/src/riak_kv_wm_object.erl b/src/riak_kv_wm_object.erl index f0b4a1a907..884c1bcdef 100644 --- a/src/riak_kv_wm_object.erl +++ b/src/riak_kv_wm_object.erl @@ -757,14 +757,16 @@ select_doc(#ctx{doc={ok, Doc}, vtag=Vtag}) -> case riak_object:get_update_value(Doc) of undefined -> case riak_object:get_contents(Doc) of - [Single] -> Single; + [Single] -> {MD,V,_Clock} = Single, + {MD,V}; Mult -> case lists:dropwhile( - fun({M,_}) -> + fun({M,_,_}) -> dict:fetch(?MD_VTAG, M) /= Vtag end, Mult) of - [Match|_] -> Match; + [Match|_] -> {MD,V,_Clock} = Match, + {MD,V}; [] -> multiple_choices end end; @@ -797,7 +799,7 @@ encode_vclock(VClock) -> %% vclock is returned. decode_vclock_header(RD) -> case wrq:get_req_header(?HEAD_VCLOCK, RD) of - undefined -> vclock:fresh(); + undefined -> dottedvv:fresh(); Head -> binary_to_term(zlib:unzip(base64:decode(Head))) end. diff --git a/src/riak_kv_wm_utils.erl b/src/riak_kv_wm_utils.erl index 78c1e45215..9331ed7adc 100644 --- a/src/riak_kv_wm_utils.erl +++ b/src/riak_kv_wm_utils.erl @@ -91,7 +91,7 @@ default_encodings() -> %% @spec multipart_encode_body(string(), binary(), {dict(), binary()}) -> iolist() %% @doc Produce one part of a multipart body, representing one sibling %% of a multi-valued document. -multipart_encode_body(Prefix, Bucket, {MD, V}, APIVersion) -> +multipart_encode_body(Prefix, Bucket, {MD, V, Clock}, APIVersion) -> Links1 = case dict:find(?MD_LINKS, MD) of {ok, Ls} -> Ls; error -> [] @@ -121,6 +121,7 @@ multipart_encode_body(Prefix, Bucket, {MD, V}, APIVersion) -> Rfc1123 end, "\r\n", + "VClock: ",encode_vclock(Clock),"\r\n", case dict:find(?MD_DELETED, MD) of {ok, "true"} -> [?HEAD_DELETED, ": true\r\n"]; diff --git a/src/riak_object.erl b/src/riak_object.erl index a4f215f5b6..8cbe62dd56 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -37,7 +37,8 @@ -record(r_content, { metadata :: dict(), - value :: term() + value :: term(), + dvvclock :: dottedvv:dottedvv() }). %% Opaque container for Riak objects, a.k.a. riak_object() @@ -45,7 +46,7 @@ bucket :: bucket(), key :: key(), contents :: [#r_content{}], - vclock = vclock:fresh() :: vclock:vclock(), +% vclock = vclock:fresh() :: vclock:vclock(), updatemetadata=dict:store(clean, true, dict:new()) :: dict(), updatevalue :: term() }). @@ -56,12 +57,12 @@ -define(MAX_KEY_SIZE, 65536). --export([new/3, new/4, ensure_robject/1, ancestors/1, reconcile/2, equal/2]). --export([increment_vclock/2, increment_vclock/3]). +-export([new/3, new/4, ensure_robject/1, equal/2, reconcile/2]). +-export([increment_vclock/2, increment_vclock/3, update_vclock/3]). -export([key/1, get_metadata/1, get_metadatas/1, get_values/1, get_value/1]). -export([vclock/1, update_value/2, update_metadata/2, bucket/1, value_count/1]). -export([get_update_metadata/1, get_update_value/1, get_contents/1]). --export([merge/2, apply_updates/1, syntactic_merge/2]). +-export([apply_updates/1, syntactic_merge/2]). -export([to_json/1, from_json/1]). -export([index_specs/1, diff_index_specs/2]). -export([set_contents/2, set_vclock/2]). %% INTERNAL, only for riak_* @@ -88,13 +89,13 @@ new(B, K, V, MD) when is_binary(B), is_binary(K) -> false -> case MD of no_initial_metadata -> - Contents = [#r_content{metadata=dict:new(), value=V}], + Contents = [#r_content{metadata=dict:new(), value=V, dvvclock = dottedvv:fresh()}], #r_object{bucket=B,key=K, - contents=Contents,vclock=vclock:fresh()}; + contents=Contents}; _ -> - Contents = [#r_content{metadata=MD, value=V}], + Contents = [#r_content{metadata=MD, value=V, dvvclock = dottedvv:fresh()}], #r_object{bucket=B,key=K,updatemetadata=MD, - contents=Contents,vclock=vclock:fresh()} + contents=Contents} end end. @@ -107,7 +108,7 @@ ensure_robject(Obj = #r_object{}) -> Obj. equal(Obj1,Obj2) -> (Obj1#r_object.bucket =:= Obj2#r_object.bucket) andalso (Obj1#r_object.key =:= Obj2#r_object.key) - andalso vclock:equal(vclock(Obj1),vclock(Obj2)) + andalso dottedvv:equal(vclock(Obj1),vclock(Obj2)) andalso equal2(Obj1,Obj2). equal2(Obj1,Obj2) -> UM1 = lists:keysort(1, dict:to_list(Obj1#r_object.updatemetadata)), @@ -127,73 +128,85 @@ equal_contents([C1|R1],[C2|R2]) -> MD2 = lists:keysort(1, dict:to_list(C2#r_content.metadata)), (MD1 =:= MD2) andalso (C1#r_content.value =:= C2#r_content.value) - andalso equal_contents(R1,R2). - -%% @spec reconcile([riak_object()], boolean()) -> riak_object() -%% @doc Reconcile a list of riak objects. If AllowMultiple is true, -%% the riak_object returned may contain multiple values if Objects -%% contains sibling versions (objects that could not be syntactically -%% merged). If AllowMultiple is false, the riak_object returned will -%% contain the value of the most-recently-updated object, as per the -%% X-Riak-Last-Modified header. + andalso equal_contents(R1,R2) + andalso dottedvv:equal(C1#r_content.dvvclock,C1#r_content.dvvclock). + + + +% @spec reconcile([riak_object()], boolean()) -> riak_object() +% @doc Reconcile a list of riak objects. If AllowMultiple is true, +% the riak_object returned may contain multiple values if Objects +% contains sibling versions (objects that could not be syntactically +% merged). If AllowMultiple is false, the riak_object returned will +% contain the value of the most-recently-updated object, as per the +% X-Riak-Last-Modified header. reconcile(Objects, AllowMultiple) -> - RObjs = reconcile(Objects), - AllContents = lists:flatten([O#r_object.contents || O <- RObjs]), - Contents = case AllowMultiple of + AllClocks = lists:flatten([vclock(O) || O <- Objects]), + AllContents = lists:flatten([ get_contents(O) || O <- Objects]), + NewContents = case AllowMultiple of false -> - [most_recent_content(AllContents)]; + {M, V, _} = most_recent_content(AllContents), + C = dottedvv:merge(AllClocks), + [#r_content{metadata=M, value=V, dvvclock=C}]; true -> - lists:usort(AllContents) - end, - VClock = vclock:merge([O#r_object.vclock || O <- RObjs]), - HdObj = hd(RObjs), - HdObj#r_object{contents=Contents,vclock=VClock, + Sync = dottedvv:sync(AllClocks), + Conts = + [[#r_content{metadata=M, value=V, dvvclock=C} || {M, V, C} <- AllContents, + (dottedvv:equal(C, Sclock) orelse dottedvv:descends(C, Sclock))] + || Sclock <- Sync], + lists:usort(lists:flatten(Conts)) + end, + HdObj = hd(Objects), + HdObj#r_object{contents=NewContents, updatemetadata=dict:store(clean, true, dict:new()), updatevalue=undefined}. + %% @spec ancestors([riak_object()]) -> [riak_object()] %% @doc Given a list of riak_object()s, return the objects that are pure %% ancestors of other objects in the list, if any. The changes in the %% objects returned by this function are guaranteed to be reflected in %% the other objects in Objects, and can safely be discarded from the list %% without losing data. -ancestors(pure_baloney_to_fool_dialyzer) -> - [#r_object{vclock = vclock:fresh()}]; -ancestors(Objects) -> - ToRemove = [[O2 || O2 <- Objects, - vclock:descends(O1#r_object.vclock,O2#r_object.vclock), - (vclock:descends(O2#r_object.vclock,O1#r_object.vclock) == false)] - || O1 <- Objects], - lists:flatten(ToRemove). - -%% @spec reconcile([riak_object()]) -> [riak_object()] -reconcile(Objects) -> - All = sets:from_list(Objects), - Del = sets:from_list(ancestors(Objects)), - remove_duplicate_objects(sets:to_list(sets:subtract(All, Del))). - -remove_duplicate_objects(Os) -> rem_dup_objs(Os,[]). -rem_dup_objs([],Acc) -> Acc; -rem_dup_objs([O|Rest],Acc) -> - EqO = [AO || AO <- Acc, riak_object:equal(AO,O) =:= true], - case EqO of - [] -> rem_dup_objs(Rest,[O|Acc]); - _ -> rem_dup_objs(Rest,Acc) - end. +%ancestors(pure_baloney_to_fool_dialyzer) -> +% [#r_object{vclock = vclock:fresh()}]; +%ancestors(Objects) -> +% ToRemove = [[O2 || O2 <- Objects, +% vclock:descends(O1#r_object.vclock,O2#r_object.vclock), +% (vclock:descends(O2#r_object.vclock,O1#r_object.vclock) == false)] +% || O1 <- Objects], +% lists:flatten(ToRemove). +% +%%% @spec reconcile([riak_object()]) -> [riak_object()] +%reconcile(Objects) -> +% All = sets:from_list(Objects), +% Del = sets:from_list(ancestors(Objects)), +% remove_duplicate_objects(sets:to_list(sets:subtract(All, Del))). +% +%remove_duplicate_objects(Os) -> rem_dup_objs(Os,[]). +%rem_dup_objs([],Acc) -> Acc; +%rem_dup_objs([O|Rest],Acc) -> +% EqO = [AO || AO <- Acc, riak_object:equal(AO,O) =:= true], +% case EqO of +% [] -> rem_dup_objs(Rest,[O|Acc]); +% _ -> rem_dup_objs(Rest,Acc) +% end. most_recent_content(AllContents) -> hd(lists:sort(fun compare_content_dates/2, AllContents)). compare_content_dates(C1,C2) -> - D1 = dict:fetch(<<"X-Riak-Last-Modified">>, C1#r_content.metadata), - D2 = dict:fetch(<<"X-Riak-Last-Modified">>, C2#r_content.metadata), + {M1, _, _} = C1, + {M2, _, _} = C2, + D1 = dict:fetch(<<"X-Riak-Last-Modified">>, M1), + D2 = dict:fetch(<<"X-Riak-Last-Modified">>, M2), %% true if C1 was modifed later than C2 Cmp1 = riak_core_util:compare_dates(D1, D2), %% true if C2 was modifed later than C1 Cmp2 = riak_core_util:compare_dates(D2, D1), %% check for deleted objects - Del1 = dict:is_key(<<"X-Riak-Deleted">>, C1#r_content.metadata), - Del2 = dict:is_key(<<"X-Riak-Deleted">>, C2#r_content.metadata), + Del1 = dict:is_key(<<"X-Riak-Deleted">>, M1), + Del2 = dict:is_key(<<"X-Riak-Deleted">>, M2), SameDate = (Cmp1 =:= Cmp2), case {SameDate, Del1, Del2} of @@ -212,24 +225,26 @@ compare_content_dates(C1,C2) -> %% @spec merge(riak_object(), riak_object()) -> riak_object() %% @doc Merge the contents and vclocks of OldObject and NewObject. %% Note: This function calls apply_updates on NewObject. -merge(OldObject, NewObject) -> - NewObj1 = apply_updates(NewObject), - OldObject#r_object{contents=lists:umerge(lists:usort(NewObject#r_object.contents), - lists:usort(OldObject#r_object.contents)), - vclock=vclock:merge([OldObject#r_object.vclock, - NewObj1#r_object.vclock]), - updatemetadata=dict:store(clean, true, dict:new()), - updatevalue=undefined}. - -%% @spec apply_updates(riak_object()) -> riak_object() -%% @doc Promote pending updates (made with the update_value() and -%% update_metadata() calls) to this riak_object. +%merge(OldObject, NewObject) -> +% NewObj1 = apply_updates(NewObject), +% OldObject#r_object{contents=lists:umerge(lists:usort(NewObject#r_object.contents), +% lists:usort(OldObject#r_object.contents)), +% vclock=vclock:merge([OldObject#r_object.vclock, +% NewObj1#r_object.vclock]), +% updatemetadata=dict:store(clean, true, dict:new()), +% updatevalue=undefined}. + +% @spec apply_updates(riak_object()) -> riak_object() +% @doc Promote pending updates (made with the update_value() and +% update_metadata() calls) to this riak_object. apply_updates(Object=#r_object{}) -> VL = case Object#r_object.updatevalue of undefined -> - [C#r_content.value || C <- Object#r_object.contents]; + [{C#r_content.value, C#r_content.dvvclock} || C <- Object#r_object.contents]; _ -> - [Object#r_object.updatevalue] + Clocks = vclock(Object), + Clock = dottedvv:merge(Clocks), + [{Object#r_object.updatevalue, Clock}] end, MD = case dict:find(clean, Object#r_object.updatemetadata) of {ok,_} -> @@ -241,7 +256,7 @@ apply_updates(Object=#r_object{}) -> error -> [dict:erase(clean,Object#r_object.updatemetadata) || _X <- VL] end, - Contents = [#r_content{metadata=M,value=V} || {M,V} <- lists:zip(MD, VL)], + Contents = [#r_content{metadata=M,value=V,dvvclock=C} || {M,{V,C}} <- lists:zip(MD, VL)], Object#r_object{contents=Contents, updatemetadata=dict:store(clean, true, dict:new()), updatevalue=undefined}. @@ -254,9 +269,10 @@ bucket(#r_object{bucket=Bucket}) -> Bucket. %% @doc Return the key for this riak_object. key(#r_object{key=Key}) -> Key. -%% @spec vclock(riak_object()) -> vclock:vclock() -%% @doc Return the vector clock for this riak_object. -vclock(#r_object{vclock=VClock}) -> VClock. +%% @spec vclock(riak_object()) -> [dottedvv:dottedvv()] +%% @doc Return the dotted version vector(s) for this riak_object. +vclock(#r_object{contents=C}) -> [Content#r_content.dvvclock || Content <- C]. + %% @spec value_count(riak_object()) -> non_neg_integer() %% @doc Return the number of values (siblings) of this riak_object. @@ -266,7 +282,7 @@ value_count(#r_object{contents=Contents}) -> length(Contents). %% @doc Return the contents (a list of {metadata, value} tuples) for %% this riak_object. get_contents(#r_object{contents=Contents}) -> - [{Content#r_content.metadata, Content#r_content.value} || + [{Content#r_content.metadata, Content#r_content.value, Content#r_content.dvvclock} || Content <- Contents]. %% @spec get_metadata(riak_object()) -> dict() @@ -275,7 +291,7 @@ get_contents(#r_object{contents=Contents}) -> %% object has siblings (value_count() > 1). get_metadata(O=#r_object{}) -> % this blows up intentionally (badmatch) if more than one content value! - [{Metadata,_V}] = get_contents(O), + [{Metadata,_V,_C}] = get_contents(O), Metadata. %% @spec get_metadatas(riak_object()) -> [dict()] @@ -293,7 +309,7 @@ get_values(#r_object{contents=C}) -> [Content#r_content.value || Content <- C]. %% has siblings (value_count() > 1). get_value(Object=#r_object{}) -> % this blows up intentionally (badmatch) if more than one content value! - [{_M,Value}] = get_contents(Object), + [{_M,Value,_C}] = get_contents(Object), Value. %% @spec update_metadata(riak_object(), dict()) -> riak_object() @@ -315,17 +331,31 @@ get_update_value(#r_object{updatevalue=UV}) -> UV. %% @spec set_vclock(riak_object(), vclock:vclock()) -> riak_object() %% @doc INTERNAL USE ONLY. Set the vclock of riak_object O to V. -set_vclock(Object=#r_object{}, VClock) -> Object#r_object{vclock=VClock}. +set_vclock(Object=#r_object{contents=Contents}, Clock) -> + [C|_] = Contents, + C2 = C#r_content{dvvclock = Clock}, + Object#r_object{contents=[C2]}. + +%% @doc Increment the entry for Id in O's vclock (ignore timestamp since we are not pruning). +-spec increment_vclock(riak_object(), dottedvv:id(), dottedvv:timestamp()) -> riak_object(). +increment_vclock(Object, Id, _Timestamp) -> increment_vclock(Object, Id). + +%% @doc Increment the entry for Id in O's vclock. +-spec increment_vclock(riak_object(), dottedvv:id()) -> riak_object(). +increment_vclock(Object=#r_object{}, Id) -> + %Object#r_object{vclock=vclock:increment(ClientId, Object#r_object.vclock)}. + Dvv = dottedvv:increment(Id, vclock(Object)), + riak_object:set_vclock(Object, Dvv). + +-spec update_vclock(riak_object(), riak_object(), dottedvv:id()) -> riak_object(). +update_vclock(ObjectC=#r_object{}, ObjectR=#r_object{}, Id) -> + Dvv = dottedvv:update(vclock(ObjectC), vclock(ObjectR), Id), + riak_object:set_vclock(ObjectC,Dvv). %% @doc Increment the entry for ClientId in O's vclock. --spec increment_vclock(riak_object(), vclock:vclock_node()) -> riak_object(). -increment_vclock(Object=#r_object{}, ClientId) -> - Object#r_object{vclock=vclock:increment(ClientId, Object#r_object.vclock)}. - -%% @doc Increment the entry for ClientId in O's vclock. --spec increment_vclock(riak_object(), vclock:vclock_node(), vclock:timestamp()) -> riak_object(). -increment_vclock(Object=#r_object{}, ClientId, Timestamp) -> - Object#r_object{vclock=vclock:increment(ClientId, Timestamp, Object#r_object.vclock)}. +%-spec increment_vclock(riak_object(), vclock:vclock_node(), vclock:timestamp()) -> riak_object(). +%increment_vclock(Object=#r_object{}, ClientId, Timestamp) -> +% Object#r_object{vclock=vclock:increment(ClientId, Timestamp, Object#r_object.vclock)}. %% @doc Prepare a list of index specifications %% to pass to the backend. This function is for @@ -383,24 +413,25 @@ assemble_index_specs(Indexes, IndexOp) -> %% @spec set_contents(riak_object(), [{dict(), value()}]) -> riak_object() %% @doc INTERNAL USE ONLY. Set the contents of riak_object to the -%% {Metadata, Value} pairs in MVs. Normal clients should use the +%% {Metadata, Value, Clock} pairs in MVs. Normal clients should use the %% set_update_[value|metadata]() + apply_updates() method for changing %% object contents. set_contents(Object=#r_object{}, MVs) when is_list(MVs) -> - Object#r_object{contents=[#r_content{metadata=M,value=V} || {M, V} <- MVs]}. + Object#r_object{contents=[#r_content{metadata=M, value=V, dvvclock=C} || {M, V, C} <- MVs]}. %% @spec to_json(riak_object()) -> {struct, list(any())} %% @doc Converts a riak_object into its JSON equivalent to_json(Obj=#r_object{}) -> - {_,Vclock} = riak_kv_wm_utils:vclock_header(Obj), +% {_,Vclock} = riak_kv_wm_utils:vclock_header(Obj), {struct, [{<<"bucket">>, riak_object:bucket(Obj)}, {<<"key">>, riak_object:key(Obj)}, - {<<"vclock">>, list_to_binary(Vclock)}, +% {<<"vclock">>, list_to_binary(Vclock)}, {<<"values">>, [{struct, [{<<"metadata">>, jsonify_metadata(MD)}, - {<<"data">>, V}]} - || {MD, V} <- riak_object:get_contents(Obj) + {<<"data">>, V}, + {<<"vclock">>, base64:encode(zlib:zip(term_to_binary(C)))}]} + || {MD, V, C} <- riak_object:get_contents(Obj) ]}]}. -spec from_json(any()) -> riak_object(). @@ -409,12 +440,12 @@ from_json({struct, Obj}) -> from_json(Obj) -> Bucket = proplists:get_value(<<"bucket">>, Obj), Key = proplists:get_value(<<"key">>, Obj), - VClock0 = proplists:get_value(<<"vclock">>, Obj), - VClock = binary_to_term(zlib:unzip(base64:decode(VClock0))), +% VClock0 = proplists:get_value(<<"vclock">>, Obj), +% VClock = binary_to_term(zlib:unzip(base64:decode(VClock0))), [{struct, Values}] = proplists:get_value(<<"values">>, Obj), RObj0 = riak_object:new(Bucket, Key, <<"">>), - RObj1 = riak_object:set_vclock(RObj0, VClock), - riak_object:set_contents(RObj1, dejsonify_values(Values, [])). +% RObj1 = riak_object:set_vclock(RObj0, VClock), + riak_object:set_contents(RObj0, dejsonify_values(Values, [])). jsonify_metadata(MD) -> MDJS = fun({LastMod, Now={_,_,_}}) -> @@ -473,7 +504,7 @@ jsonify_proplist(List) -> dejsonify_values([], Accum) -> lists:reverse(Accum); dejsonify_values([{<<"metadata">>, {struct, MD0}}, - {<<"data">>, D}|T], Accum) -> + {<<"data">>, D}, {<<"vclock">>, C} | T], Accum) -> Converter = fun({Key, Val}) -> case Key of <<"Links">> -> @@ -490,7 +521,7 @@ dejsonify_values([{<<"metadata">>, {struct, MD0}}, end end, MD = dict:from_list([Converter(KV) || KV <- MD0]), - dejsonify_values(T, [{MD, D}|Accum]). + dejsonify_values(T, [{MD, D, C}|Accum]). %% @doc convert structs back into proplists dejsonify_meta_value({struct, PList}) -> @@ -528,15 +559,8 @@ syntactic_merge(CurrentObject, NewObject) -> true -> apply_updates(CurrentObject); false -> CurrentObject end, - - case ancestors([UpdatedCurr, UpdatedNew]) of - [] -> merge(UpdatedCurr, UpdatedNew); - [Ancestor] -> - case equal(Ancestor, UpdatedCurr) of - true -> UpdatedNew; - false -> UpdatedCurr - end - end. + + reconcile([UpdatedNew, UpdatedCurr], true). -ifdef(TEST). From cf0be545300573d48c1a3fced5501bc003032f1a Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Fri, 13 Apr 2012 00:08:14 +0100 Subject: [PATCH 02/25] Corrected few errors and all unit tests now work. --- src/riak_index.erl | 2 +- src/riak_object.erl | 126 ++++++++++++++++---------------------------- 2 files changed, 46 insertions(+), 82 deletions(-) diff --git a/src/riak_index.erl b/src/riak_index.erl index 2645d3ae41..242184101f 100644 --- a/src/riak_index.erl +++ b/src/riak_index.erl @@ -368,7 +368,7 @@ parse_object_hook_test() -> end, ?assertMatch( - {r_object, _, _, _, _, _, _}, + {r_object, _, _, _, _, _}, F([ {<<"field_bin">>, <<"A">>}, {<<"field_int">>, <<"1">>} diff --git a/src/riak_object.erl b/src/riak_object.erl index 8cbe62dd56..5df89357d7 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -46,7 +46,6 @@ bucket :: bucket(), key :: key(), contents :: [#r_content{}], -% vclock = vclock:fresh() :: vclock:vclock(), updatemetadata=dict:store(clean, true, dict:new()) :: dict(), updatevalue :: term() }). @@ -128,8 +127,8 @@ equal_contents([C1|R1],[C2|R2]) -> MD2 = lists:keysort(1, dict:to_list(C2#r_content.metadata)), (MD1 =:= MD2) andalso (C1#r_content.value =:= C2#r_content.value) - andalso equal_contents(R1,R2) - andalso dottedvv:equal(C1#r_content.dvvclock,C1#r_content.dvvclock). + andalso dottedvv:equal(C1#r_content.dvvclock,C1#r_content.dvvclock) + andalso equal_contents(R1,R2). @@ -141,72 +140,59 @@ equal_contents([C1|R1],[C2|R2]) -> % contain the value of the most-recently-updated object, as per the % X-Riak-Last-Modified header. reconcile(Objects, AllowMultiple) -> - AllClocks = lists:flatten([vclock(O) || O <- Objects]), - AllContents = lists:flatten([ get_contents(O) || O <- Objects]), - NewContents = case AllowMultiple of + RObjs = reconcile(Objects), + AllContents = lists:flatten([O#r_object.contents || O <- RObjs]), + Contents = case AllowMultiple of false -> - {M, V, _} = most_recent_content(AllContents), - C = dottedvv:merge(AllClocks), - [#r_content{metadata=M, value=V, dvvclock=C}]; + Cont = most_recent_content(AllContents), + case length(AllContents) of + 1 -> [Cont]; + _ -> AllClocks = lists:flatten([vclock(O) || O <- RObjs]), + M = Cont#r_content.metadata, + V = Cont#r_content.value, + C = dottedvv:merge(AllClocks), + [#r_content{metadata=M, value=V, dvvclock=C}] + end; true -> - Sync = dottedvv:sync(AllClocks), - Conts = - [[#r_content{metadata=M, value=V, dvvclock=C} || {M, V, C} <- AllContents, - (dottedvv:equal(C, Sclock) orelse dottedvv:descends(C, Sclock))] - || Sclock <- Sync], - lists:usort(lists:flatten(Conts)) - end, - HdObj = hd(Objects), - HdObj#r_object{contents=NewContents, + lists:usort(AllContents) + end, + HdObj = hd(RObjs), + HdObj#r_object{contents=Contents, updatemetadata=dict:store(clean, true, dict:new()), updatevalue=undefined}. -%% @spec ancestors([riak_object()]) -> [riak_object()] -%% @doc Given a list of riak_object()s, return the objects that are pure -%% ancestors of other objects in the list, if any. The changes in the -%% objects returned by this function are guaranteed to be reflected in -%% the other objects in Objects, and can safely be discarded from the list -%% without losing data. -%ancestors(pure_baloney_to_fool_dialyzer) -> -% [#r_object{vclock = vclock:fresh()}]; -%ancestors(Objects) -> -% ToRemove = [[O2 || O2 <- Objects, -% vclock:descends(O1#r_object.vclock,O2#r_object.vclock), -% (vclock:descends(O2#r_object.vclock,O1#r_object.vclock) == false)] -% || O1 <- Objects], -% lists:flatten(ToRemove). -% -%%% @spec reconcile([riak_object()]) -> [riak_object()] -%reconcile(Objects) -> -% All = sets:from_list(Objects), -% Del = sets:from_list(ancestors(Objects)), -% remove_duplicate_objects(sets:to_list(sets:subtract(All, Del))). -% -%remove_duplicate_objects(Os) -> rem_dup_objs(Os,[]). -%rem_dup_objs([],Acc) -> Acc; -%rem_dup_objs([O|Rest],Acc) -> -% EqO = [AO || AO <- Acc, riak_object:equal(AO,O) =:= true], -% case EqO of -% [] -> rem_dup_objs(Rest,[O|Acc]); -% _ -> rem_dup_objs(Rest,Acc) -% end. +%% @spec reconcile([riak_object()]) -> [riak_object()] +reconcile(Objects) -> + AllClocks = lists:flatten([vclock(O) || O <- Objects]), + SyncClocks = dottedvv:sync(AllClocks), + Objs = + [[Obj || Obj <- Objects, (dottedvv:equal(vclock(Obj), C) orelse dottedvv:descends(vclock(Obj), C))] + || C <- SyncClocks], + remove_duplicate_objects(lists:flatten(Objs)). + +remove_duplicate_objects(Os) -> rem_dup_objs(Os,[]). +rem_dup_objs([],Acc) -> Acc; +rem_dup_objs([O|Rest],Acc) -> + EqO = [AO || AO <- Acc, riak_object:equal(AO,O) =:= true], + case EqO of + [] -> rem_dup_objs(Rest,[O|Acc]); + _ -> rem_dup_objs(Rest,Acc) + end. most_recent_content(AllContents) -> hd(lists:sort(fun compare_content_dates/2, AllContents)). compare_content_dates(C1,C2) -> - {M1, _, _} = C1, - {M2, _, _} = C2, - D1 = dict:fetch(<<"X-Riak-Last-Modified">>, M1), - D2 = dict:fetch(<<"X-Riak-Last-Modified">>, M2), + D1 = dict:fetch(<<"X-Riak-Last-Modified">>, C1#r_content.metadata), + D2 = dict:fetch(<<"X-Riak-Last-Modified">>, C2#r_content.metadata), %% true if C1 was modifed later than C2 Cmp1 = riak_core_util:compare_dates(D1, D2), %% true if C2 was modifed later than C1 Cmp2 = riak_core_util:compare_dates(D2, D1), %% check for deleted objects - Del1 = dict:is_key(<<"X-Riak-Deleted">>, M1), - Del2 = dict:is_key(<<"X-Riak-Deleted">>, M2), + Del1 = dict:is_key(<<"X-Riak-Deleted">>, C1#r_content.metadata), + Del2 = dict:is_key(<<"X-Riak-Deleted">>, C2#r_content.metadata), SameDate = (Cmp1 =:= Cmp2), case {SameDate, Del1, Del2} of @@ -222,18 +208,6 @@ compare_content_dates(C1,C2) -> C1 < C2 end. -%% @spec merge(riak_object(), riak_object()) -> riak_object() -%% @doc Merge the contents and vclocks of OldObject and NewObject. -%% Note: This function calls apply_updates on NewObject. -%merge(OldObject, NewObject) -> -% NewObj1 = apply_updates(NewObject), -% OldObject#r_object{contents=lists:umerge(lists:usort(NewObject#r_object.contents), -% lists:usort(OldObject#r_object.contents)), -% vclock=vclock:merge([OldObject#r_object.vclock, -% NewObj1#r_object.vclock]), -% updatemetadata=dict:store(clean, true, dict:new()), -% updatevalue=undefined}. - % @spec apply_updates(riak_object()) -> riak_object() % @doc Promote pending updates (made with the update_value() and % update_metadata() calls) to this riak_object. @@ -343,7 +317,6 @@ increment_vclock(Object, Id, _Timestamp) -> increment_vclock(Object, Id). %% @doc Increment the entry for Id in O's vclock. -spec increment_vclock(riak_object(), dottedvv:id()) -> riak_object(). increment_vclock(Object=#r_object{}, Id) -> - %Object#r_object{vclock=vclock:increment(ClientId, Object#r_object.vclock)}. Dvv = dottedvv:increment(Id, vclock(Object)), riak_object:set_vclock(Object, Dvv). @@ -422,15 +395,13 @@ set_contents(Object=#r_object{}, MVs) when is_list(MVs) -> %% @spec to_json(riak_object()) -> {struct, list(any())} %% @doc Converts a riak_object into its JSON equivalent to_json(Obj=#r_object{}) -> -% {_,Vclock} = riak_kv_wm_utils:vclock_header(Obj), {struct, [{<<"bucket">>, riak_object:bucket(Obj)}, {<<"key">>, riak_object:key(Obj)}, -% {<<"vclock">>, list_to_binary(Vclock)}, {<<"values">>, [{struct, [{<<"metadata">>, jsonify_metadata(MD)}, {<<"data">>, V}, - {<<"vclock">>, base64:encode(zlib:zip(term_to_binary(C)))}]} + {<<"vclock">>, riak_kv_wm_utils:encode_vclock(C)}]} || {MD, V, C} <- riak_object:get_contents(Obj) ]}]}. @@ -440,11 +411,8 @@ from_json({struct, Obj}) -> from_json(Obj) -> Bucket = proplists:get_value(<<"bucket">>, Obj), Key = proplists:get_value(<<"key">>, Obj), -% VClock0 = proplists:get_value(<<"vclock">>, Obj), -% VClock = binary_to_term(zlib:unzip(base64:decode(VClock0))), [{struct, Values}] = proplists:get_value(<<"values">>, Obj), RObj0 = riak_object:new(Bucket, Key, <<"">>), -% RObj1 = riak_object:set_vclock(RObj0, VClock), riak_object:set_contents(RObj0, dejsonify_values(Values, [])). jsonify_metadata(MD) -> @@ -521,7 +489,8 @@ dejsonify_values([{<<"metadata">>, {struct, MD0}}, end end, MD = dict:from_list([Converter(KV) || KV <- MD0]), - dejsonify_values(T, [{MD, D, C}|Accum]). + Clock = binary_to_term(zlib:unzip(base64:decode(C))), + dejsonify_values(T, [{MD, D, Clock}|Accum]). %% @doc convert structs back into proplists dejsonify_meta_value({struct, PList}) -> @@ -585,14 +554,9 @@ update_test() -> V2 = riak_object:get_value(O2), {O,O2}. -ancestor_test() -> +reconcile_test() -> {O,O2} = update_test(), O3 = riak_object:increment_vclock(O2,self()), - [O] = riak_object:ancestors([O,O3]), - {O,O3}. - -reconcile_test() -> - {O,O3} = ancestor_test(), O3 = riak_object:reconcile([O,O3],true), O3 = riak_object:reconcile([O,O3],false), {O,O3}. @@ -609,7 +573,7 @@ merge2_test() -> O1 = riak_object:increment_vclock(object_test(), node1), O2 = riak_object:increment_vclock(riak_object:new(B,K,V), node2), O3 = riak_object:syntactic_merge(O1, O2), - [node1, node2] = [N || {N,_} <- riak_object:vclock(O3)], + [node1, node2] = lists:sort([N || {_,{N,_}} <- riak_object:vclock(O3)]), 2 = riak_object:value_count(O3). merge3_test() -> @@ -792,7 +756,7 @@ jsonify_round_trip_test() -> O2 = from_json(to_json(O)), ?assertEqual(bucket(O), bucket(O2)), ?assertEqual(key(O), key(O2)), - ?assert(vclock:equal(vclock(O), vclock(O2))), + ?assert(dottedvv:equal(vclock(O), vclock(O2))), ?assertEqual(lists:sort(Meta), lists:sort(dict:fetch(?MD_USERMETA, get_metadata(O2)))), ?assertEqual(Links, dict:fetch(?MD_LINKS, get_metadata(O2))), ?assertEqual(lists:sort(Indexes), lists:sort(index_data(O2))), From 5b7040bf7b94a526ee8ff6f4b8ef2427bc2b1bcd Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Mon, 21 May 2012 18:51:02 +0100 Subject: [PATCH 03/25] In reconcile/1, descends already tests if params are equal. --- src/riak_object.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/riak_object.erl b/src/riak_object.erl index 5df89357d7..cef7a75fdd 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -167,7 +167,7 @@ reconcile(Objects) -> AllClocks = lists:flatten([vclock(O) || O <- Objects]), SyncClocks = dottedvv:sync(AllClocks), Objs = - [[Obj || Obj <- Objects, (dottedvv:equal(vclock(Obj), C) orelse dottedvv:descends(vclock(Obj), C))] + [[Obj || Obj <- Objects, dottedvv:descends(vclock(Obj), C)] || C <- SyncClocks], remove_duplicate_objects(lists:flatten(Objs)). From 19666c4183b45560f3159e069b319c5de31d841e Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Wed, 23 May 2012 20:14:12 +0100 Subject: [PATCH 04/25] More efficient reconcile/1, doing pair-wise syncs --- src/riak_object.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/riak_object.erl b/src/riak_object.erl index cef7a75fdd..4f9d0117a4 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -164,8 +164,8 @@ reconcile(Objects, AllowMultiple) -> %% @spec reconcile([riak_object()]) -> [riak_object()] reconcile(Objects) -> - AllClocks = lists:flatten([vclock(O) || O <- Objects]), - SyncClocks = dottedvv:sync(AllClocks), + AllClocks = [vclock(O) || O <- Objects], + SyncClocks = lists:foldl(fun(X,Y) -> dottedvv:sync(X,Y) end, dottedvv:fresh(), AllClocks), Objs = [[Obj || Obj <- Objects, dottedvv:descends(vclock(Obj), C)] || C <- SyncClocks], From 554ead868487d842c467022c29a75362a8d9fa5d Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Sat, 24 Nov 2012 21:56:10 +0000 Subject: [PATCH 05/25] Merge with branch compactDVV --- .travis.yml | 12 + README.org | 3 + include/riak_kv_dtrace.hrl | 48 ++ include/riak_kv_mrc_sink.hrl | 20 + include/riak_kv_vnode.hrl | 18 +- rebar | Bin 108939 -> 119212 bytes rebar.config | 8 +- src/lk.erl | 46 -- src/riak.erl | 13 +- src/riak_client.erl | 357 ++-------- src/riak_index.erl | 42 +- src/riak_kv.app.src | 11 +- src/riak_kv_app.erl | 80 ++- src/riak_kv_bitcask_backend.erl | 62 +- src/riak_kv_bucket.erl | 3 +- src/riak_kv_buckets_fsm.erl | 53 +- src/riak_kv_console.erl | 125 +++- src/riak_kv_delete.erl | 112 +-- src/riak_kv_eleveldb_backend.erl | 233 +++++-- src/riak_kv_encoding_migrate.erl | 20 +- src/riak_kv_fsm_timing.erl | 81 +++ src/riak_kv_get_core.erl | 38 +- src/riak_kv_get_fsm.erl | 234 ++++--- src/riak_kv_index_fsm.erl | 89 ++- src/riak_kv_js_manager.erl | 5 - src/riak_kv_keylister_legacy.erl | 125 ---- src/riak_kv_keylister_legacy_sup.erl | 48 -- src/riak_kv_keylister_master.erl | 80 --- src/riak_kv_keys_fsm.erl | 79 +-- src/riak_kv_keys_fsm_legacy.erl | 284 -------- src/riak_kv_keys_fsm_legacy_sup.erl | 49 -- src/riak_kv_legacy_vnode.erl | 11 +- src/riak_kv_lru.erl | 352 ---------- src/riak_kv_map_master.erl | 262 ------- src/riak_kv_map_phase.erl | 301 -------- src/riak_kv_mapper.erl | 311 --------- src/riak_kv_mapper_sup.erl | 48 -- src/riak_kv_mapred_cache.erl | 80 --- src/riak_kv_mapred_planner.erl | 75 -- src/riak_kv_mapred_query.erl | 214 ------ src/riak_kv_mapred_term.erl | 2 +- src/riak_kv_mapreduce.erl | 8 +- src/riak_kv_memory_backend.erl | 352 +++++++--- src/riak_kv_mrc_pipe.erl | 332 +++++++-- src/riak_kv_mrc_sink.erl | 434 ++++++++++++ src/riak_kv_mrc_sink_sup.erl | 83 +++ src/riak_kv_multi_backend.erl | 16 +- src/riak_kv_pb_bucket.erl | 131 ++++ src/riak_kv_pb_index.erl | 102 +++ src/riak_kv_pb_listener.erl | 62 -- src/riak_kv_pb_mapred.erl | 222 ++++++ src/riak_kv_pb_object.erl | 303 ++++++++ src/riak_kv_pb_socket.erl | 646 ----------------- src/riak_kv_pb_socket_sup.erl | 44 -- src/riak_kv_phase_proto.erl | 35 - src/riak_kv_pipe_get.erl | 82 ++- src/riak_kv_pipe_index.erl | 45 +- src/riak_kv_pipe_listkeys.erl | 33 +- src/riak_kv_put_core.erl | 12 +- src/riak_kv_put_fsm.erl | 158 +++-- src/riak_kv_reduce_phase.erl | 112 --- src/riak_kv_stat.erl | 990 ++++++++------------------- src/riak_kv_stat_bc.erl | 402 +++++++++++ src/riak_kv_status.erl | 7 +- src/riak_kv_sup.erl | 53 +- src/riak_kv_test_util.erl | 190 ++++- src/riak_kv_util.erl | 26 +- src/riak_kv_vnode.erl | 256 ++++--- src/riak_kv_wm_buckets.erl | 3 + src/riak_kv_wm_index.erl | 40 +- src/riak_kv_wm_keylist.erl | 3 + src/riak_kv_wm_link_walker.erl | 31 +- src/riak_kv_wm_mapred.erl | 352 ++++------ src/riak_kv_wm_object.erl | 68 +- src/riak_kv_wm_props.erl | 29 +- src/riak_kv_wm_stats.erl | 14 +- src/riak_kv_wm_utils.erl | 49 +- src/riak_kv_yessir_backend.erl | 350 ++++++++++ test/backend_eqc.erl | 252 ++++++- test/fsm_eqc_util.erl | 4 + test/get_fsm_qc.erl | 44 +- test/keys_fsm_eqc.erl | 199 ++---- test/mapred_test.erl | 467 ++++++++----- test/put_fsm_eqc.erl | 2 +- 84 files changed, 5356 insertions(+), 5721 deletions(-) create mode 100644 .travis.yml create mode 100644 include/riak_kv_dtrace.hrl create mode 100644 include/riak_kv_mrc_sink.hrl delete mode 100644 src/lk.erl create mode 100644 src/riak_kv_fsm_timing.erl delete mode 100644 src/riak_kv_keylister_legacy.erl delete mode 100644 src/riak_kv_keylister_legacy_sup.erl delete mode 100644 src/riak_kv_keylister_master.erl delete mode 100644 src/riak_kv_keys_fsm_legacy.erl delete mode 100644 src/riak_kv_keys_fsm_legacy_sup.erl delete mode 100644 src/riak_kv_lru.erl delete mode 100644 src/riak_kv_map_master.erl delete mode 100644 src/riak_kv_map_phase.erl delete mode 100644 src/riak_kv_mapper.erl delete mode 100644 src/riak_kv_mapper_sup.erl delete mode 100644 src/riak_kv_mapred_cache.erl delete mode 100644 src/riak_kv_mapred_planner.erl delete mode 100644 src/riak_kv_mapred_query.erl create mode 100644 src/riak_kv_mrc_sink.erl create mode 100644 src/riak_kv_mrc_sink_sup.erl create mode 100644 src/riak_kv_pb_bucket.erl create mode 100644 src/riak_kv_pb_index.erl delete mode 100644 src/riak_kv_pb_listener.erl create mode 100644 src/riak_kv_pb_mapred.erl create mode 100644 src/riak_kv_pb_object.erl delete mode 100644 src/riak_kv_pb_socket.erl delete mode 100644 src/riak_kv_pb_socket_sup.erl delete mode 100644 src/riak_kv_phase_proto.erl delete mode 100644 src/riak_kv_reduce_phase.erl create mode 100644 src/riak_kv_stat_bc.erl create mode 100644 src/riak_kv_yessir_backend.erl diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000000..b636a6c288 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,12 @@ +language: erlang +script: (rebar compile && rebar eunit skip_deps=true) || (find . -name "*.log" -print -exec cat \{\} \; && sh -c "exit 1") +notifications: + webhooks: http://basho-engbot.herokuapp.com/travis?key=ad9a6e51d706903e1fd0963c7b8e064b93e85b56 + email: eng@basho.com +before_script: + - "ulimit -n 4096" +otp_release: + - R15B01 + - R15B + - R14B04 + - R14B03 diff --git a/README.org b/README.org index b614655f18..04e76b9f5d 100644 --- a/README.org +++ b/README.org @@ -1,5 +1,8 @@ * riak_kv ** Overview + +[[http://travis-ci.org/basho/riak_kv][Travis-CI]] :: [[https://secure.travis-ci.org/basho/riak_kv.png]] + Riak KV is an open source Erlang application that is distributed using the [[https://github.com/basho/riak_core][riak_core]] Erlang library. Riak KV provides a key/value datastore and features MapReduce, lightweight data relations, and several different client APIs. diff --git a/include/riak_kv_dtrace.hrl b/include/riak_kv_dtrace.hrl new file mode 100644 index 0000000000..12c8632ced --- /dev/null +++ b/include/riak_kv_dtrace.hrl @@ -0,0 +1,48 @@ +-include_lib("riak_core/include/riak_core_dtrace.hrl"). + +%% Main wrapper macro for DTrace/SystemTap probe annotations +%% NOTE: We assume there will be per-module dtrace_int() and dtrace() funcs! + +-define(DTRACE(Category, Ints, Strings), + dtrace_int(Category, Ints, Strings)). + +%% Probe categories +-define(C_GET_FSM_INIT, 500). +-define(C_GET_FSM_PREPARE, 501). +-define(C_GET_FSM_VALIDATE, 502). +-define(C_GET_FSM_EXECUTE, 503). +-define(C_GET_FSM_PREFLIST, 504). +-define(C_GET_FSM_WAITING_R, 505). +-define(C_GET_FSM_WAITING_R_TIMEOUT, 506). +-define(C_GET_FSM_CLIENT_REPLY, 507). +-define(C_GET_FSM_FINALIZE, 508). +-define(C_GET_FSM_MAYBE_DELETE, 509). +-define(C_GET_FSM_RR, 510). +-define(C_GET_FSM_WAITING_RR, 511). +-define(C_GET_FSM_WAITING_RR_TIMEOUT, 512). + +-define(C_PUT_FSM_INIT, 520). +-define(C_PUT_FSM_PREPARE, 521). +-define(C_PUT_FSM_VALIDATE, 522). +-define(C_PUT_FSM_PRECOMMIT, 523). +-define(C_PUT_FSM_EXECUTE_LOCAL, 524). +-define(C_PUT_FSM_WAITING_LOCAL_VNODE, 525). +-define(C_PUT_FSM_EXECUTE_REMOTE, 526). +-define(C_PUT_FSM_WAITING_REMOTE_VNODE, 527). +-define(C_PUT_FSM_PROCESS_REPLY, 528). +-define(C_PUT_FSM_POSTCOMMIT, 529). +-define(C_PUT_FSM_FINISH, 530). +-define(C_PUT_FSM_DECODE_PRECOMMIT, 531). % errors only +-define(C_PUT_FSM_DECODE_POSTCOMMIT, 532). % errors only + +-define(C_DELETE_INIT1, 535). +-define(C_DELETE_INIT2, 536). +-define(C_DELETE_REAPER_GET_DONE, 537). + +-define(C_BUCKETS_INIT, 540). +-define(C_BUCKETS_PROCESS_RESULTS, 541). +-define(C_BUCKETS_FINISH, 542). + +-define(C_KEYS_INIT, 545). +-define(C_KEYS_PROCESS_RESULTS, 546). +-define(C_KEYS_FINISH, 547). diff --git a/include/riak_kv_mrc_sink.hrl b/include/riak_kv_mrc_sink.hrl new file mode 100644 index 0000000000..49f5ee2e0c --- /dev/null +++ b/include/riak_kv_mrc_sink.hrl @@ -0,0 +1,20 @@ +%% used to communicate from riak_kv_mrc_sink to riak_kv_wm_mapred and +%% riak_kv_pb_mapred +-record(kv_mrc_sink, + { + ref :: reference(), % the pipe ref + results :: [{PhaseId::integer(), Result::term()}], + logs :: [{PhaseId::integer(), Message::term()}], + done :: boolean() + }). + +%% used by riak_kv_mrc_sink:mapred_stream_sink +-record(mrc_ctx, + { + ref :: reference(), % the pipe ref (so we don't have to dig) + pipe :: riak_pipe:pipe(), + sink :: {pid(), reference()}, % sink and monitor + sender :: {pid(), reference()}, % async sender and monitor + timer :: {reference(), reference()}, % timeout timer and pipe ref + keeps :: integer() + }). diff --git a/include/riak_kv_vnode.hrl b/include/riak_kv_vnode.hrl index 894da6a8c8..080e0a0a8e 100644 --- a/include/riak_kv_vnode.hrl +++ b/include/riak_kv_vnode.hrl @@ -11,15 +11,6 @@ bkey :: {binary(), binary()}, req_id :: non_neg_integer()}). --record(riak_kv_mget_req_v1, { - bkeys :: list({binary(), binary()}), - req_id :: non_neg_integer(), - from :: term()}). - --record(riak_kv_listkeys_req_v1, { - bucket :: binary(), - req_id :: non_neg_integer()}). - -record(riak_kv_listkeys_req_v2, { bucket :: binary()|'_'|tuple(), req_id :: non_neg_integer(), @@ -42,6 +33,12 @@ item_filter :: function(), qry :: riak_index:query_def()}). +%% same as _v1, but triggers ack-based backpressure +-record(riak_kv_index_req_v2, { + bucket :: binary() | tuple(), + item_filter :: function(), + qry :: riak_index:query_def()}). + -record(riak_kv_vnode_status_req_v1, {}). -record(riak_kv_delete_req_v1, { @@ -60,10 +57,9 @@ -define(KV_PUT_REQ, #riak_kv_put_req_v1). -define(KV_GET_REQ, #riak_kv_get_req_v1). --define(KV_MGET_REQ, #riak_kv_mget_req_v1). -define(KV_LISTBUCKETS_REQ, #riak_kv_listbuckets_req_v1). -define(KV_LISTKEYS_REQ, #riak_kv_listkeys_req_v4). --define(KV_INDEX_REQ, #riak_kv_index_req_v1). +-define(KV_INDEX_REQ, #riak_kv_index_req_v2). -define(KV_VNODE_STATUS_REQ, #riak_kv_vnode_status_req_v1). -define(KV_DELETE_REQ, #riak_kv_delete_req_v1). -define(KV_MAP_REQ, #riak_kv_map_req_v1). diff --git a/rebar b/rebar index 7c50a64dec23b450888d81e39c7d40b8ec54e815..7bdb274306991818770baacbec3a76beff5befe6 100755 GIT binary patch delta 110921 zcmY(KV{@Plu(Y$WZQHiJv2AB#n|Ey6HaE8IjqQ!?jo;^-dcT~i`3YS!HGOqQFGAK7 zLnUwlNwUZyxOtwZy2v0PYj_|aa3CNc7Ur%Fj;@R*=EioR+J26B=H9zc=cWuFlytQb zV$jCM5QyV(tBFZclwqrB<`g6(pgjBkQp~OFrV=R%^`V@ha-`)LX$+4axVf)YKt1q1 zz~koVwo-+CKFb$rauqDDbvBF__UkX?GI!(vzQx&@$Ct}Gg3!M@bKPBY@7;6nz8`rf zpITnFHw<7wj%OBR>-z#j?xqUJCGp@QYiJpqR}zcQT(S&P@ENr_m4e$WsxVsa)jIiz zYq4Ozw(mFDYDatgo0hGcKU%8HbqCVC{tU;&jsEx^P_f%VKm4jxnPDA@D>A87&Sw+_ z99oo-2$_-O)h|+KU%f}bjWsmXFEUhXJwfqivsLJ|HQaEMMpSHgo$H#18nare{Nk26 zA5xcQ-feO3*K57l4&rp#V$-f*{*_lTOg$&Am7c`c$!Si!nh@kPAx8Mw$jp?%2#V6q ztfoL56n%-My-8 zOHQirE7qK1-c%noZGW;tqlY~7ZS$-?iibD@%t6SzhgQa=D7IjFN6T(!jl;c)6&_`c=3XX|>r}b98 zdgsqmnc-rnB9kJSAkYQ9d%$0*HWMnJMd(#VZqUkSM_wN$F*W~Gdg^81)uhNnTPi4n zH~$GHkobU@8x6Bl=u9)@r?Pf3C8QTC!LB|%CvjM|B$It!P-#t8m4FQ>r^;=~;VWck zim(+M3r@%{xu)*OW6wmxRRZJi7O+Q4KB4cO1Eu6l@%Mw=OAn9rr2|Y9cH*c@DAMzXhE@4xeM?Y% zOAo$!N1el_-rSgkFF2Q@P#>@_p=kV7F49NEIC&`_ui;d~i+4<=Y*CbEV=M=>=_bT^sYW?cYDA}SVRs{p(jQf1_eAo_8l71)zvE>w4AKBkl1Vmb(upVKy0~}r zR>7IU1!7z;!>||3cyrI@{$!YYKeZE#iN5|7VdV@>SHS@Ew;&0@88x|R880qNd7{J^ z@tmKig^oEt-SUg9v@u`{AbATKMGJ*xhj9d_!ei75=3(3^!qnlxB47edV1GI*!=5`T zq^82eNT8(oI!OZLC5#dGS#_ZhbsFu0*Pf-SzL=n~_o6WtN&GC6ZktXmcy)DTnjG!c zr%tU3vCoYy-s3P6P)u}V+>+k*`;2b^T*rlTKRfWyO4!6MQI!q4F16XQ_%3jAI{wy7 zogmhbr89+2VbK+J=V1nwK*G0*q%e_{kxb&L$f<@ei&Fx#jEb&^ZZuxBnK8+_LldymOv9&E#EjBmEeET7QYA)XO!|!3JBt)`dp?b|@iUBZC+!`q0H1 z2Vl-F%{51OMg2+2in=l3Ta`N%Oz^s1amvCw*VT(^%C!`2xhX5zSTGN(u2YE?Di-dm zsJ+^?$JYTK8eOf4j(BIn=g{Q+`CHF+Q940P?SS%zbuFeY9bP%QtT)X<$iYMYD034c zwtJ=aZ(H0nED?j0dW|&jpp~*BM-}4SVAr|03~C4R_WB=~Hp+~v_l+ZB=_z1vHJT}h z!=ZW(Q4}?GrJ=?b5eUf8QFJ+PJfwGx^$10<>6k$7+j`8~dZXTXl;*jgLZ!nzR=TX8 z`F20)4PsfI$LynR0kj)Qmw)n)o`f2XLy%0P^ zFD8MKy6uaZ+bmcin#oA=Xx+e%p`x7by@QZP%piX{;a>DV4zf+P)IGwgaur+44){fCfieBdAosOZjnL>5 zHRGkq!P5@)_K+%BRSbKN>V&soC->awTp-xZ73adGssZ8Rf|1y341%J~$E_=)X|7&( zk5k;uM=LoaZdN~gP z3%tZ;JKt=lAz^#;>m5{qEAQb=`p+oKa7y`zq5-l3of6-ppEgw4}Q;%7|Ta zzq7@?xM+=$2ri^)a`Y;HaBxN81_%LCZiHdaN>wkNm1SS@%|~P7ZJ%oz&)#P^J%3t* z?)c4*fBo}@@a>_Pu@9@~l6_Ur@s&@gXOq_8zOW@-IhLAsA;;Ng*2|E8Zgg5jIgj@# zGsR*>SbH9zt<$>yV&86wtk-Lo>)!-0uUrPsA!6dtN za!u@N?T2=1X0$QXA60LxveNG$PH7(cNoH*$PPG}XIVaL%5lndu-ftKAT10Ih;XO-R zXKjaidGM^l+GfCVPIo<0pC?ldqAIkr$cNLZXn53n-0NdGzFxz1=zW+nD)t`{<}{SD zXNR0gkFhVWmkdyz>WB49Kx+UC(;@vibODVu5sg*_@kYm;io_X}e>iM;T^X`rMzk>k zXQJ-Wt)D3&s@*n76e8|p#JVXa)x)_NCY!3!JL@1*=Nv0Q3w;hQLEIfrwgmSKd@wj2 zenV3{t3>yi&Hl$u`TX00Sve&?uE}C~R=(&XrW*XW;03K0 z<3ygG#=SoEI-*|wNg)YA+X$BUtIzPD;B(M`!BQ7I826vN(y@TiDLO-p)9%2zsG6n3 z@i){+TlT_-EzM@YVloVW^bxC&1!eD&LitP3YS`c@X}~|>^dN`*;2}aVHR*&--!WAJ zt?mGuCJkewPnvD%10;(rkbemDoy2Ms(znQlG#)8KJY^PKU}9N04<{dUTnXwmY4&0m55Zfuf{ADSZ9@lpF4c2ZCI`h^6OxBiTALIH4-uv3+^u#Kfoqz4@PiWzw8r2WnT++26bl2v%YfTX&Gv@YqqLXXN zr0nhsz8UXll zlnCyvX64$bwS@Z-S50!tBcuPMs}vADDX`pR9poa7?mIxqiMiKN{&G1mdK&{r&d7?1KU*}q z7D*D4ZdzP>QOxh5{{zK*odfeW>Su@=60{BAmJ{W>&vM*hwgu~jhoKCFQx?6>gIW?; z=30`(_sMy9-q*-W+<7hqZHe6YwK>m$5^ zjF)ux!V}oy-Zq7Ci?}Y9iVHWl+Bt10kl3p3bKVJNR-Cg)l!K0|}=sw-|_hhD? zR`&PdNUp}Gg%`?tZ>oi-SuDWo$TZ&VdzB-;4uSt=xG)}f%)%c3^Xg_aZp;H`;JWYM z5Y^0|UC1xqw(opGJ_0Ynmp@-0hPl8Q(c9i$-=uQy^%s|rAjjQJ1F-efbP<33$$b0% z^6h`(FtRJf)cde|N91pS&HXWPw<@vonJ+0A|7mP zOYzThWYuKtY~+b#BO`p|-BMfeSiV_V@fe{QCH^}~67Aa@iLWXe&2rW2qa64HDe&gh z+$<>+ly-kwY^f`d3umdz|Awryken&Gc{%@5VE1T7Ybik37vkPB)(3Va6TfpaBImRn zROZFLJOrsod2y6(8RuTPSBF!z+Y+R^+e0Y-VKcwt$sys+`0Q$bYdODI?(HJ&-omAM zcZsZV|J$zZcE_hgVkxm}XchkP$) zNYK?>cwpJFOZ_Vc!X_@XnFg3iFBY&Pxm>YV0>pak`YkPi&b%BgE#0i$9{e{5Q1&=d zao8|asX01>U!UKf9VmLYujxkpwn1UK^NMyanRqXq6nh_qP|j!Dt=Bo@|K2@ zfkiPSVJnP=p-U)-i&jc;+sj*7)9*OQz;8rS<7lYLnczf~HmB4&P}f+=@H17?k`yD; z{lyCVprd1ms-jdIO{kWVA7dCYW?(gvFZC&u>0H26JPq<4qD)aC)uBi62rE=4VL&0R z=5CT<*vdEAL&Xg#nz<9#o0w8=$YO<0wS;Y!!~0!>7x5DkQ0glZnYgP+1}FU?ZKP?) zTgN1Zo2A4-VZE}{Y;p#NREo}qU3zcoB!8y4WDC23-SMLHj!OI2M%_q3dZD8W0~?N= zyvDjriFsgeLOr>jHmCD87*p37pCc?VtQAjfRUJMCKL9TQ}Z9KcuFUHM5S(R zBRE_9BRk0%;7aPABt7bnxkH7UxawnC;*0ZCtTgiVS5P;F8RbyNKjp;?M zc6H27#==^Dw_@euz|2NoR6ScBB5ne|=kTpD-tm#J2>moTmMS5c%sjipS)ai~lB7{; zmHSQW5XZlWV(FLdF$-!TO994d@@~C%_;A{3gG5>|2r6b|s77ROUE7o>Ugy}+JMi_0zWZMjOxBzM( zYuDz`+{=Rv`MHDUuZxQ>#S~e1_C*@+Z@JNtp?-1thjw_oRO*a%GkmFg&V6iYj^8-u z9Rv|5aUx~?*COy9Dn&S4kGV7<)3PKF69(?Ez%G1`awEus z?I52)D|93M>`Y^ZM})j>gP<3aE`6{LrY~#DB$W$_(m&nLN`?ZdKvIf-DoYVeBUcb3 zX)6rHV9qI^V_71oZ}MRK?VxH5i739d`wp`e_*A z>>od`OB5$Ot(K0-X;i*gqfxP#->2)eFqsNOik-)xu$<7Gb`KV#H_5_yLku0Dz))dw#dcU6~D3hLFAG^7R)Eo@9cCr8 zu>}UYWm9+j2+>))xEjutV`W*%F4A6 z*3Gq8$n^j+%hqj6)8D}kX92UPC$pCW&JE?pE$PNR2hI)G;fU$V4F{P65zv$XXCV^K zw)LZj%xAy|99{x^7n3~-~_QR;}zae94oppKKE_eYu5n7Lx>>tO;MsitGMkE-+ArkHs-2izUA zs9^AD<{zY>Q4qG&@Ss*o2U|+yfV4OyV@&u$&@f`}b#3Ml?d}8|P$)5Kv~u5fhAlH( zK3bH2Y1=jf@KmDfjcXLK^Kv|%b5Ndh_9iEV8szWQYt9`%e@#XI?n%it1$O&qOEY;0 zTepy`wfCYVmOsV$|p-u((G{wDxs1AN3_m<%mWNA*Mkx3qX z<9LiBjpkYn*D~kz!OJdCHaDEwhLf7F|3LFUOh56F=fnls*^MJ4harOY4u=^oF&MhJ_ zS^`=Ghb^?w#NNbv=hPltfG3$fic$kg_^oOzNBNJ`ziZj>q&zdz`=R*R+F?1v3~UF; z+4nc&O}#EI_cffJNx>=hUsWc0|{K995~P9C&xFp!@or05bpF-(2{S>9;93h z(N+oZFuuy(pUG#b-&%Jbkv6C9IYX+7-!;bn9^ETlv+D2I<`VrveFUQ@IN>L-zLq#t zfW<&fy=cBZ{uV1!;Hxue=i^GV&e!enypwc?tL<@c8HgfzuVfsHl-!=H*|=%i(Q`W3 z+~ErIe>ZsiCp+fvdSH06(VO?3{s~7}>cP-+)MF94%dXy?=xM$kSNs%@)SW`r8|m@2 z{wWpmlL_$MTd2FrKHtsb;%>3p?NDB=N2D$ zvso1+0fdULxTjYzcJAJy4YxXktQ`D4)Xe8yRP&|*k4WFH=C_{HD5=BJwmUn#FVCpe z=UxB(CBtPg55dQ!DNU?@9`Rc`Jx;niBVsRxTd(FXpAOF9{@3l^Uhn4=O>G$YmG*pm zdX2tf7qdKVyB}X@UZ%wZtDIW)oA+~ug16`TzyX!StS227e&VaVw zr}IG=IN$bnct2StYLD7&KQ6Dmi7y-z>~;T(cfO%{0!F5&Wsn%|2Y_|%7FV3 zczp;*92L~%Yw7Uu_^cZ`nM2eQeDAMLPZwBVwHrNS(Ht8C__%??d4hcRD32PRyZdV_ zq7HACTZfbN_&RW*4&vue2{f%P~HV>zC$NOf7IqhWU_ zf215(8FlEj!pH;?@2v9fR;v!-9e;ki!zR-f8)G}%3M!ZlcP#?>l1LSC1~z7=Iz=hP zfqEOQwVv|o8{eVdo^H_?8=8wQsdTSlNkl(psn(~MEOja&A!7B*=^d&S18y+j`Ky!* zlM0oWQ)=DhZd{4CO1v_1jvox#b)Nd@c6*_th5`LjZ%0t-XjQt!O-0pCtU{yC=H9bO zV%qw%FNJgF=N8{ZjOJfmF0fF3Ft(2=~ zrDRD;{uxfEr3kUs?n>V3uki!X|NSSl(N;WUK|w&aVL?D({`)eV%}tD*8I2tsQ#Irr zHksjjPBft*oN z-qX$#D?}@o1rWA4T7RtpA^+M^)PfJ(edu;`kC(-+lUm()PYRZwOlh)^bGpadLj*3s zf#eO*C^`Ix35%ntlKYlyt;V$I)RpxBA=Hn$riGZ}?3;;R^WD*QCXpf|q2|%-Rp?(s z^Avx|ftjmg_r_sd&>-E0v9_F>m5H@mjVJxZv*1K%^wA6>{4<-Nbg6dT>w=`7#j13l zNsGW+V#l9lk6v9M&UgxbHA^s}9wB2Uv4dyS(jH1l6&>-?vhb#cWZY>lr2HC66_IRz zc1l!d`C?mplh(AZ-0->~kIi&{);4d9lCNtGF-U@&3AtVT z0`AV-Mg$GRoSqhjO^q9pc^wrbKC_4IF!=(rnl8;--6#ACNt|zU{Qo+N;~f>4sUGzw zpQa{s(an-M%?eZbtf%7CFsJL#vFMpT*KXWfk_3H;Fhq=J`6{WWrQ}D@zzU+ME?B); zL=DP^U~ptckcj6A8_sE3Lv?gW7+0c51N&?b83B}$wZyx-+3UCq{jc?{b6>io{lo2~ ze-#s&fTPkH}ecK?gowczy7G*|MC@3BdX z!O(<7*iq3*_(4TrigRp5i_Z6^3v0ip)eZN#T=+@%my59*kK?@ezLUIFftTroMrjTlY|h)* zqJ_ap(Z0P==8;3}FbR>0XckUQWo!s$a^Wz;KB%&@6s!ANt zMcEGkno0j+{DZf?0^W%SEMchZ-sL3Z?9rjs&XBf#_Xr&WL=wkFl9Ow@k<3XkECEgj(D zgJ(`}P~hb*euUAnO&s^&H!moJUnm?mO_2&feV?J@Z%=M6Mbe`PWfu)1T#nxN*$_y6 z_uhfC{VN$h2Lx_t=08joe0v_9zSe)+u2A@_j(odEuF*My(y>a{{6e@u*6%8XLtduU z*p*VE=9g%p87c77W7EszgO$xOryj=X+I^+P$KCAiWs%=lge zKxHGM`|p zi)y^REb_8Tzq)3oiPR|d%!40cLm|Xq;{5#oOcVUi|EfF4O+oF&yW+#?TtX7 zlEN)^T2YdWKw%HooagOKfi~7+lc;cWy?MJ#h|sj!+9X02osvMd*0ETVil|A0UZu6P zCKm0&X@z>mMwmm(a$J!MM&7LOoD3vM-9V<)t6ov(!62?0{nhk~pvDIf5ebPK*vv++ zF&JR=)6j!u(9woOm3b>bG~|lw_XrWUEVA@9oV6=TFnZ)^%M=||%kkb!vFIe237Zwb z+3>^>vTxV7<@~k15-w6aS^=&>PRb0~7hv=(h)*%J8cb5z;0{(wAhVnv`)CES_*bp8uNoEvpDu9Vb~72h)^ zW_Th3F$Wj^r7Bx0u?8wfW6O+Cwz3rq_7#?fROxX`){>EhszD|w9Pm@Im*F(B- z(XQN&k-CT}^?rb8&sa6)L)Z)>Ly`!HbN-WqEpeeG;jF7z;2rcAn+B|kDp*aCtgDhd z2mik;PZqW;#i1GM@BIRjjy97xF?dLHCD>DZBwp1a*8((sEvOmm;7oQDSh7^(f`v%? z@H(uTQ!rd0*r+{iIYl6#se&ECtU)Y1P#vA}vjCAeh20fzzcu-cnv$#$X*m%^0&l)U zy3njpsj)BpLfpm5U$bPV^j*R1Zc`$)-V6%Y}=DL7D|wYN8@R4Gjf96)#*%;n985 zLK?Y&%x~g;5*4ZCstex{xe5VSY^@ir>N&QwJXafPP%Eh9zsNF-ZoBsNyyUP_j1iP<65PwMP~ zZLK$9)Jh*sD{7xH5R1c1gk`&^#2%_-=!n1oP-E#Z$4w3ymC!1AsYx9v@5slFO*k*{ zw2%xm;v(fq$w5@b!W1+UHfnhF^@>PFC02>@Ru!$=Ta#8~h9sliP<22KQ%U1iS(53O z1(Rl)OPmEy%7(UBD_I3LNOXq5LQ@cW6INOjoRu_$NFM}Kn@yKM51z7%crk!;sR8<`t5x3h4aMuKX-aA=ia>RlJ z8KA{Aa>rgt3_X6Izrp{u4K4<-H;~Z9cokO2z;G`@F!(La0=_5 zbky3AG1VZ0b=*r68%bVlsY9Cv#1N?fi(SP+h!n>GZ74kei-fxj?d%ql4S zg$Q>>7JLg$EHR2wpW3&KOhhv!4u+6&CnC(kDuhj~u)_y#SrKy)5cbr7$23*YGZd&q zO%{M^?)4XFq&EeTNqiv{sASEDOxcmN*t+qQJC0?IyHBFImJCK2^}ANoVBn0pWKhV}6If$a={hCcp3Jju`}^((9whQm{0pqu#iSst@8)8}VVL6A8Ke4vbwd@0Hsa zc_zwbJ6&J7jyE7XgiF!za>G_|$yA2thx0;u=66?BEU6#vG`s&ZLpvX|KPrIvexb#K<#{8U9D<;@%{Ru@2UPb_VpgG;8*F| zaFSh=H2obZR4$wE+{V^GA@tbdeKU~yavdYPvGHm!@eL?=*?3I2t@byv-Yl(c#gIE) zC`#60ce(?ZPgiLebtoq8b1Csuwwd?NnhU&zUN+yZGbHL$7B?7wufMMjlFyatu;QF+ z>(?60bYEuIhr7L6BgzR3=dx*P>e_j|r3Z2?3^z9kovfU*m|mAvdJOv)%BVA_`To7e<4l9 zoz?E09h^%n5=PxGRU2%4qTJ?7&Frw; z*S~!I`nEf8-JVUX#4D3u%Xp3-6iDmA>h*p=^na^8Pf5ezvW`cu+tguc-Y$6`eH$u5 zVz?uzU-_DGRLRi4o1P-@;(7VP$?`=ubbY&24Zr=PAFUYDgYCC>n-$n{=#H_5P}xo-IHx?C||>+z}{fbC90Umstdrhfq5C!JnCBrhn_uDI$7;bgc3=Q~{I&QRbg zWUq%?E_6om-20sD_N!hZ+kIa&SF3qddx6c(ZuilowTzn$l_vok&?+CBlyZa>j_jy8 z@QwT~Uq_*)6VbEEx#P=><>VYCwDV7E>EzZPn~fN0u)6%7b%&FA+16RylOZBZXfI8~ z9N){fnTrRkEqslR!;e}%-Ie1uMxpi3D(|U=zOmYjJ^`Ppz`41H&98;iPjeesci?P> zdNa%pOTpIfYc%EWa2C~0+m1e)(4FnS6Q~w6-)uRHTAQ4MWI4rv86$tUUxfJMH@(6iqobMg(8y4qhsvh4S;eSt6b1 zI=V#WV@tjJ>=g~()hb@nnwVcG-t(?VwWuyAU zK*?Wi-T%QB`g&_7#a2%{OMf-=&-S0vx(MZ+HpUEcnU9%-`P>x zHF%5<&BCPhecTD|9NXyA^>OR4I3=++PU%uGe2~ataaSITt&8&_Th*WR2S_f&kGEPO zMYwfz1)y&JHU67|XfeEZa2IIX*9xN}S!H=Vy^9J{1v?gCSAP1W_Umb?n%dj-H5#6> zIX=v%+hqUq0ij#F6+{NC*8BLlJlwpw?GkRcF+DiXwSs7y_iE(5^uC{)6ZYU)jK9sl zyJSrldt9n%TaYNV(8up=0%+#^P!*!b^Y5139}43MUgYsRnMbVo)EDQUqY+77kF_O< zUT%fEOoQCcfp8?TB<}E^%(U|S>=OF!g6a+Sev4R3@eNoTn^Rd?QBwlQe1cL*n#5ji zw;qZ_piAyCN&bOt|3K}zs9H&Xq$1yL=9;jT?bRwEi8@-amAQM{0(AkoZKg>6-S*^0 zGxlTbKi!VdWawLtQcGvn0_033%2=q#Hj|7erza+_Fi6YM69dz&o?N&;JK=Th z6~tPvzV1slXuY)6lM~g=*g2$kCWXE^O($!b!xtKn%WnC$eT!jd75u`;c(RJy2lT@J zb}B+gaQ5QhW$sY7^S!!urK92H_ye7(4;-*(3paabg7cVv7d+=fVJrWfKNO3-Vf|m@ zg~-HVQxyUPL_9f@o)qx2MKi|+Zgy;ZdTKXYG}pp|bDMn`i&yJn?pXX@=9kac(pX~V z6P$YtHgz|R_i2_EE zeCq|qMJ?|y1YUgnZyp5a9(uj(jXc@m18htkYAjUaD=~f$lLH|Auzif>;sP{fOED=a z%_h<`4%8kr)*$UINS?7#rI+X?3W&5LM&Z6`w=9)UQ5l2B28tHTr zjU`iIHCb`6B7n4xj=Sv~jS8ZBc&ClJ{K(lJa#YsS+NLI5v0M*5AFIhg!WsN8^#N*oLv3fcxLQ+#IntF9g#t3hd@*obW9Hk&q=ofsU&%etAfJ}<4CU16kG znT;Ea&B1~>q#$Nw*Bo$FeSX+8*!DpbezKlH^?1f8mq7U%b`XA)@bFI>0}1AcBK-4E zj7^92Vs$>Fb(#oJ3~hWzMnYmK^B?yf1(rmxkjO1KiTFXN=i#oA1SH@K9zhhyq+qAu zT;#XI&}UtvXq(nvgih#)iVaL;QB5ZABn(C(k&i3}X=@BA`;c1pYXZC&ndg2&Zc*F; z2lRg3F8^7Qwe8Ixs**xOhUd9h)lSwbMzJ>9yyi;kSP74HmFSUE~=tb6ULeV0|_jD2&IrhK#=wZEUHY=K}qFdv6|M##zY zj2_yhJ>}-2zjt z^}|X>L!r86nxNCk`Z|Ke#g5>|{ME(8AaJqEA$>kzn(zB>EwAHaxeZlOG1tJ&TjZq1 z`D4=tsHx=B06djfwNZNhjn4P3L`UOrd-=X~;6Z)!y(qj_e~z(j^5H37`&xe&_RS&m z{kHkcClF)%gmXK;NJSN>&7UN?xW2RhxO`Q-NlEjb)q`I4TGiXHV4@;Bn}{hN1)S=> zor}zm>(}?|1?&4h9!8PgS_#)V+|CYj(cVvbwSnYs5@~|<#&g>#=b|n*g{@-c#)s8> z9U{B$v4@cz{kZ@U`Nng?VaJ`->`||i^Cn&M+?LhrVmG4mOF~^68{52d7WwU{CzSyc zXEWS>7_r*!3VN**mwI&jt4}*(z~xh8uZ6q8OK&Dgbsl}^(>uz za-^u@rWss;|LN|}9GHv(P1Ojp3}HLEI&I3LGj977ZQgPeZMh5b`+`;fqrEizGu0Ss zwuzS#5y~J+A>CFcyoY#a(ZzMtDb+hC6`B~>ke5=jx||?=?Zu?4%*)mgU`p1y$%7mB z3`=mBYD;Cd-G6o*NY?3nbzU)|T&9xZGK)S7Xq5cac#L2$X;1Qg+DwJA5#y<)4qu>9 zo}(_q8GH~b52Ym&G&@x}6)_xK{im zBJfGJHSww4cB|J*tOzLb@35)MiCdgUQdi7@hNy$}1#vx+QKwD#qx>%bgrK|v+Lc$K z98I(L_J?Ov9y{e}QFkua&D>3om%}f9RjOab(91%ALTU0?a-6pKxC=N5`-<5aoVJai zF5{dgez6f0{ATW7gzDZZ=aDap?aVoWY%I*!AcILvH~$fK8Gl|nY`DQD36lIN9j~1R z+D;D^-QRBY_S$9Ynj_R1ve=ezTdmGP=2?gk_h%X;x;lesiIEaz*&cG%l=NnHTxnGJ zm=%-2(Tuu-`#>|kD1|TtS-Km1lNr~N!iSpA4&$pa!4iV%m+x#@qr~{20mr1dfZ~sH z+S2Y`%aq6NId}Tv=D#k}>OE%B5NsE?(~Ih`y6|B#w<&ig#0sT*ORK{pM^R$56rZBn zb37n7ryBEh#VvgK3~G;^jpjkMS8MX@P(4aO3n)lE-4n*sL6AMn(vJ{MMImfK2DPNs zsWe2Ve$z@LMl(3l)P>acQ8z_yVhQxKW{1v{CO(+q~!Vy(g=A%(m%}Zu9(*}ozT6SyVpSheFxIe4?akiLl zFS_*h{c?40Dnakb0Y=UCa0){gHsQ}H%LNP^E4;P(Dz$L(Yt-dCHIMsmSDU;&vcYXj zV)3b*WMcEanq?owdu*u4;q7N|Fp)uE+fKM{gDHQJ)f{^gH?4V^f_^+5`2{&f2f9s~ zg?SFmX|7@|2FbXgLtgcpCOYJuIy5n5P#h+DMx{VqRnIYSDT*titPBH5;Q`$npXr{f61)I4hI`MaUotp+LP0} zgGqKt$*$L$UyB-Ei+18(A;becjTJ%f*LEYrXg{&hS@(u|F`n5cXfu;a<%B2p0$vJB zVxoLfS)B4gFEmwQqe{_Pc}frD3OLZF@BxHaivDP|KW8+I#@B6?N$n+qBxO;r?pPHy(PyFa$yAAofeh#MqG(=Hb$mk7%by)pZVR{0qP zuRi_I_`q_cP9QV#+_ffC0#bz7bx#!dsH$$_x=J=BF;Z80Hjd(vN-Vz`*$YeFTifzD zAC6$E(8yobKY6%0gufQyn&a=(cE)T6h2L?6K-jg=Y$9Tp{7XhXN+5xh$we$n`K{e_ z?*ijre6yazm(Ktz&rE&x9s3R9=#9y6gHAJZZ{pXl<)y~NyB}##W8Qnl4g6#9;l~e% z6$TxS`hDK~j*1lNCknvHyuu*1y(laZeI8I`DFaBIGH|fHs6!6T`dxT*MI-8 zPd}zb=-<$4(a>&7o{!zfO+Ea!*InmLOWUuwoO2%Eh6^B=2>%@Ta92D~7(3kE{WwGV z_B_uXK$;z&VzTqMxBI-*R5^n3Xf3-{oZr`Q4xI4cRqrFay}sW^ z?>9GJ697~GkOSfCi>!yx_d{EmkgaF&*}G94rDyG7gj*alupVVF@x zkP$6;%Q?~*q49IYi&9X6wFWH9x%Z?v)A_qge7b=P%`=0M3p}so-n$L=S zdc+!`i252ig>+1j^7d!`mX~bvmkCLTFMiZ*58$L*p9ZsO1**-UhijjuTe@x z2%ARYF7**?YImOGCQ`pSe5WrVgo;I3JVBUB9wx}v)81`-FjQ}5!G zmwyG9%*qE!@67D@N+ z1E_uFo}eVSRx!Lg!~c8d6RluJQvL(l^#8H#|K|VS`Tyb+ZjLVh+a&kXLa@bq!svVW zHZf%Ec)oN2k~r;>Y-U5tDi2+Jt#X?KqtV7njMX-dp+~uNiwa#_&wV9*omfkDh zvACg+A}5dj!!EF@G5M|B8R-tsPMLsh+AkPV#eysctuBXV3ubi#e08xXDW9F~eE00W z|Lco)sY_<_IYh^ZDOoV>8q=#5S3Z@GDtd0+I1aK0ES_Xab(}PqvdkA(^x0%)IZEv? zo7CHsTy>(-?9E&>cnq~{b=kLTsyF{B&)YR!{)!FiS>f9_9tcu*`(0ScM|rwU!D&dH zW!_`P3RNTOg)kbp(Nc2&p;egEHYk3_Q@Gn4?d^pUJ1U`TP2+Xuh_P8^<|6ycKGW%{ z>I{znIJysQOdKi1&u^JFv2esFZ@I=G%7S+(>|E1xQESFTQDSpfr{}IaT&K@o<&)n@ zbfVw1;}rI>gz0Y8po?y`&wzYhN?DDLn*d86(P{?-Z|c+JDk7P1?MAfC^y#9RW*GbR z`A1vnvDiqLHmaR5@lVpMt-S5~#lsO_5h?-zo3IOk%BKqVwqxsc=KZR&`KZD*@a(s@ z_jL$WrUB!?nN-?#vN)p4=|&W4-SEaZn|Cs+!3>FlwCwp3JS8~5zEa1){a+TdZet8BHw1?9|KsW$n*;5FbsbM^ z+qTV#%?T#S#J2gwwr$(CZA@$@6Ie;(*a9Q=lQ zaSK3xhKCe9YC$^mXhWgk65!a_q1WaCodx=Tm;U|kzpY!~`-?h`8yG`_ zUU(xC4VRCMF47J*pTRT~4lB?pqsq<`07txqq`-l$5b6MxQRZMnotHJFpoSg=!9={& z6oJx+g=`S@rzljwhMb^({S{6dz&KG|Wwn5^qNCH6`i{a(0mX#>p)TcwO-irw*dQ9N zUw+H&?-UJ#U(ScZZmA-YE)_CfMj~HyvS+IctGni%NDX&CQIs?mhU1}v%0`{&*Aw}G z5`7Ue>HzX2_3&9qjvdNOz7J_cyu-`1e-0xm%&ULE5#Rsn$xl}wAe;*U5Ost3lq92q zf#DfLa{ipJ^|LShnJfFHW1U>YqKs(N%Gzequkyg(V6dA} zO~pwMO8eNDfmdsaS;B)S1RT9ngPW{KS5sUfWsxxsG{`t9N6c^zfbJL#ib&xBzZi90 zdFMt7b@_~=!9pd6Es=t+&_2e9(n6Y^BXyt7$V<9sr>p29wh~mH%o6hwR%|rT#A`vw zXi_g&a}Bl=+d$b_VVSi#;AA`;EvqsC&$SumNcq zf3d-MuM#?B;f*xy`h1@-Wx>9&XdY(}oDr-R%YQ5mH4X(0q@$X*xMBES4M4>noV$5h zuj??u@Fk-}as1B69I{$LfO3=qxra_O+igNwnQm-19R59M9Xnt*;Ip;_N;ji_hXT+{ z>lHZoH0$U`C=<3?P;K@ibzK#iyEbBqdO={qZp2dT0&q{*1JOIejmpRhf%e4Xqe_*; zQ6%y6Z8kT6%E1QS2D{&{5RtJ-_L4hgZv)6Ew?GH}aikh-s$OG!J9a`f4@f+!K6fPS zN@-v7VfPo=5zE5cxCyGdTMgzM&;uI=d26OSJ$GwV<6okqp@|>Rmf7k7Q`iB&g0bg~ z)9RRgcRY`bZ(KvInyY6CKgd~R)Wu-v{kdErEW*bC7U{n0%aIo{+T5Y~7O|>sWI!^3 zsG1^{>iu=3%Z+_Ky5hr?yuhbMAQWgSmBDadhy|kqCn1Ikqx4ntZm_3hsiuhMU(s1W?U&K=}&v8|hhwb^70cI#Ola zpbP*;Qxrj?UraW2Tbq7oRCf%*^C6nx5P|ox@fp;@4gok2GRS(6xRy6;qVD)^1^&5~ zV+Oy(5Ib=X8`BhtMm`HTwm6spK%WxkEAj zcssp8l4M{-k~YL^?$@_GWOdhvY#`YE5n+N)Knk%?& zm;H}})~*hGfehoFRK)sL*RE2A=`WKF;pWd7pvV5#*)YNLbj|y}n(y(_lAIW-ta4yy zm}~Q{kxJODz61^XY}nnBw(W7 z^gq@yW}kF@cDSkmjGa%h`-|Rs!jH3jw3F5oLT+cv#qyaSJ>N6}nL;QfCDr&fk9Olx z*A&g$pw1L$t2bDlb)h%z3a)5`v-9kGw^b(4y(rY;FM4^(`)JRubj=NXkH8uV5IUXyDj%rc@aHs+hbxy`ZFdp0Cp zj?dhKABa(k$@h7Q{QQBppp^l={rL3I*ywiaEhc8!mgPkot44l16&*LXZE`HfqWMu2 z&IakQ=o9UR7(Fj#$w+e)5CHFBUfa@F`C0ho=F-0h)@Dzoa3tFCmlbP%mNd1{3*x7W zTc5RZfkd`RwMoPNopK!I;2ThC^hTSa zG0u-{AinY@sUtn4|uu8~pjNh=mQ3`jr8S3)raZhT*BB_prGoq?nGOzL}iGYIbpT>jbdhP@LpA_5637+TB*SeLO&hQb*zb zLs-#T7!WQ19AvCQkTJ8z0Lpf|uFG`N;*Y*@h$e|XLMc6g18$4S8>cK|Ca7;3{O4G- z7J_xhT_80Dj`Asc03#lkeHenZHpg})6;AOMz5f>RUA+Nc&rh71wV{EileWrrl`aiz za-yY1{?>gp1B_6X)%yuHhm1eLESK*&Ov(&Nc%#0+pmU|SzWx9VJkhVhkTbY^qCL%F zYlt+vnUR5vYXW7MtujNbchj(|mUZG4`vB%J^{$YuQ%g_RpFeqRhjqjhE7KCW<1C?R3#GoY*%DD;gzon>| zRjFH8X)Wk17dI692_}{BLWQq%G|0L-A-GPZY;r;9cA!QoS7`jJAE=BqpCq+4Jgd%l94@h(1%9bss*DheeAgIs~_##on z3d9TdqID6k777V#TM(5n?ye&$^fjyi(X8VNs~3R?QX>gpBz|RYKAG9U_qnPUTR-MV zA&(;Scf{73Rd8m$>z=vShyiEj$Ubc&=psV{u4t&M@2|re`!n9jw`XR;T|}KGfa#$6 z;kdths70EpXwwavk>K-X*UP9<@zgQjy}|AB{)37Nc58Th{NLio?4rO)Y;O<1*7ENo zSLo9Fp6n=fSFkXVRSy)kRshT&!6 zFu9?>)9wM>w^GS=Uh4}w@3ggieeTqBc~R)RJA7UlA1}~&e`uUlD#q$7>?rGfVlCC$ zY=Ke=R5~&7o}6VFpxxJ!oEicx+?XMPuUq#TfeM-jHXvr;sf^sepxReQgvNSq_hj9A zxDr~A86nx&No*wyrtdd+IF&xAiMc3gGfnQXa|nokt-yPrP|DZ_59L~}Vo=MIyVW1G zgSv!qp4#qoN+5G&mH<_{tC6CP(X0M`lQFUaS*kw@4z#1kMmRMw+Btz5ZpId?xFKsi z@)4vI%3LnQBv-8SKht?RL`#zQWD>sS_Rj9_G6~M+hP}$`jaGI9&g$7plR6ucbYv@o zH{0N<^!PieM1-rM>tYd&f04pAdBYHp_yw5dd_+ T@Z|-)b`xXfn5sCD?XS2;JlD z_IRPc9Nk3L{Dzer@s3t{=1V0>w-v7Hw zoR3(?-&AO!+YgjL;}0G4AG$fHtsT=Yup5EtQkr{W7aT|V$o{^u;G z*@Mm#Le)vL*f}mCo2_TNeCO_)tZ9Dx_VSAQl?%jSt#-``>1x+u!m@RV~D-XuMuFbQ}meJ=2Nshm=ybBF@15ViG z0Umu9V4C<*tr^yRG*Z1~N@cEE^N;5F(;ecYc=XS=vJ)%QI%A#(h6PI+GV65>fy*?G zcSkz4>6#N<54*u@QQn+84{`o6RM;zW)8k0I0ClDf!AZm0O6hRtVP_I~(?wbOQ@(wK*_LXCsz*fU$=B?B-*e`9^t~LSJ*H!u&&Kj1k|K zk~x|h`@XIApHW=JzIJ&Es)^~hAi?=Tjb+0Jyf-y`a{ml1I=^1g9*EthM~P6v&ghS1 zUiT`UoU1=2c3uZ6uH%PoeXV(B#xJWetRb-WB#ns^_!2%Dr*Ie`^g zn{-`UiQ-P2XUhfD;;d#6m!Fa&xJo1rpr@fh-iHuj=OP_vf6w!b$A)Q0D!{3&7A3r3 zMPX;Apcdgjr25hF!l19I((obs7QN(sE+G_8DirStjP4+sNXH5f(uR?f{)|L5hB$&! zlSx5lKCpuh`^j!dO%BU4x9G`Tc~gsK0it|djVvieD&7PkZIO3?{>Atccj}G^2$I6& z2VW_tmd1|b2xg+To8KgZ1#Q(f)>#QWmt{EAoF5Ba08?2|h77;6$p#>{L)7(>#QYk; z-t|jMp)&d0h56|34Uy8o&JDT4F`E3EAi9zvDVlKyWn{=I(Jw*L?g|_wdZr^$>WJn& z>bZu_6f1fEN!~+0gR_IT+2K9|w}wOvl9xi^?hYWHWycvEJFI~dxj&uq}43;qCV!rgRq`) z+(AV6M4<-R13%;XvDHd(-623}Y_`T2!P@3j4~FM`y&@kH4BK%P8F|wgIB6?9zl(+t6RTpr|=(-xoQS1r+Xrd??sUjpA zB<0{xIbo=0b$2||nzdEu7SS4+>Mby%|0It|@eg!kutvWtS203{&G7M!pcgaHp^Bv>4PWd5nT*ro(l`rhh%Eh3M$EqEDyK=>3FvgGN~dTWUBn8ax=fgE zN9+QVMVgG66|7=ByuQLNcjGugDg_A-2h`o%0I0O}>}3O9@-V1hP}+IX%kGpYd*a1w8zLOW8?A?h^45p1D!G+|%M>#?pZ+`SOF2w`eE?cuh_~l*Wmi;sFGcLhHhh zUT_=(JjzIrKZD8n;NdMlrpfA!98bS9uXBUmDB=Vqd<2Ym?%}*seu+Hx zPv`_dgaoP#NSx$m#YxpXAV}8?@>(DpWeEGYvv$xv1S*0ojl{n3km18J~-20SzlAiO%GqVrT zc0IJsZ{p#6UF-kl`uL+%-(9t%P}24t=N-dcO_NCJ_C-oeIW0o*W*9yn9ebWUY_r#zoS6cQ}gq$1Q{(LqB}pQ8)&?}iiJ|>;Z*kM_oV_x zJ-n_;R|567J|eBT>HW|Uv{pYOR{&)P3bgSEij&Ccdc_0pHB zYdmlaBtHH$yOIu(#66`xQ2anbRGrG{8BL3ZWuG_<6ef$hHsK|@s5TgxoSK>SR5I{h z4LISs5qz;uChggyb2x3#v12%Ab-Pga{Yrj>+xb|VJ1xETJqrbPHsz|9CiC#@*5H-| z&AIp|YdY1C=^ff0uhN_{o3*#wB@n<|7lFXqJy@zV$Vvr@F7>?tjc!ewlquBJ<*HP< zmp@#xc+k$ps8kHT9!ENNRB(b^#gz| z$N$GP40G*BbpLgZ(u0G5p#EQ}!^qmiz}Cd^f3yyDl}X!mVRQl^X7V7`02|DQ`co)t zv?L%Q+dm*ENjPb)hPHi#Fth&&9yfN;b-u!K^)<-%=R1t<16;?Y*P)Q9g zNQXg)gRTLAaMLHZC(ff&vJp6=&Xz>xJy$K=snPD6e}Lxp>|F6zs$W{c%Hvj$)#;0I zq_I`B!6jYjeP_K@IwB&mlg*@8J#xfPNg(*S zCdTiJ62CmUJ=_=OcmwzEPdXp}(6()oav$Y|)+?D245_T0W4Lcm$gG1ekB`+Le( zYsgc%RqjJ*U}JaLlc(d=3|6BF{5i@o!G;iCNa@%2gQJU9Ni>}obBjWYcq>Gj z8dNt{xBsDYjA|3hO2M~~-gKbeAm7tv!DH0%lq5^t6oPy_&Luyl;mmY%Dziz^>mG2}d=0W%mTT2^ThFXE)t`Q#Z;5ecx-B{SmD!@oX0y}cJz z6KBF0+S{nJrqQaEejlEt0YM6Rfb}ART}F!;OwUIB07B)81?mkPI+h8(XrcP^V9s@$hWp~(h0Yye`h1`!DVMB2Ly zpv#;(c|1Z1Mm;P)&R1)16(s4E%B)8gu?(aHLW@`LG_D;{xl?jnyQFZFxoPQ}LdGRr zEqEsxpPg=6;gOwSBNNwz=qQS0t?>RpPIktls`X zLsI%Z)r(M>U!0g^*lndj%}k(vxvARAnaL5CbvpxAT0=k)2}_A zr0(5c{(EmyrM6OL?zd_ZywOxjWqe$vCSpuhTcr`oYsIso%sKJu-Sk0s4>6*Z(OoO5 z6@7L&Ei1wx_kI9Y=fU!!IeM0RSPG5!iRRLnD4omz@mcK)<<(YO4wsGto1&&HiQ6 zCf*uaJ%t11)blo!(I%<;4bks`Ts13`irqi53bWz`!`T6G(dBL-(;T-W#Lz6J%1{oAJ;3u*2Q%~^)cH=0C54=}gcVc-rQGqaYBNcYYdZ;J!ptU>;(!(y% z( z#S)Kb*6x8PMrL{}EniuE+gEg_A9ribzufdb9{t@OhMd;MQs)bWI(=+e41@-CF~7bR zoG-j)f$x6{H<`J;&ymq{qF483Id60xPuS0Iy@gaMzU~LXoxsP+)CKT95=tb%<i+$tN#2R0lT?5Jj%ic$kK-a@2S8Orh`&>+Ps$W8B3O8Dl3Q*{NPd-lPAG1x* z`4`sqa`<>Ee7C(FxP8=DBQ5r}H@~SKNiz9<{8g;^bbUnHc{SU)#+)HIO3CJpe&k)v zuDqwOby*?m(EH5gpKVYpuO7O5;GMm323%+A9;GRr1>n*RUMw_r{^UgVTrYfW7r9aQ zVp!TAR;(f>wM=bg_uQ4ne@(i5J(jTw?U>R2s!2*&&-|+j-X(mi{2@D<8X}1>4@Cu; zYp7n^*l5(Ds46^k_7a`aISqsPm7)r_vwmDq?Yi}ky$b4ixpWq@U$3x#7>kjC0m!Yk zrPrmlbNWH`xLG8anOUd3f2TrgrUa`FA&SCy(yVj^380(MEh^ zdpV>4KoOSW8$rBj!^qDe$R;Rm6R@*h-$KeU^c_!=(Lj@4Xvzq^<-}{dwSG)V*^)(I zRZwr$k;$-=+c$cV!iM6VBkykO0~9<@y5bXY{)*t3EscI-`roWJ$fG$ig#`k#Nd*Fe z^#5`JJ4ciMNd&w#y`1ru(^p?&s_?Y9qWERxL`5laL_ju-QN#aGK-4hs1Ss|WM64Ay z78q#>qlb}JfM#H~0e8{aPQsi}QE6!^W&o0=Z7pPpl(bt+S{astTP{;{^E$tsp0YkW z-{1d5SqIl0X5np(y7i7;du`qRt9-IWfZ*(9;S}^7Ht!U~f(~l=0p?FNAm6HKek}V~ zGO6=}F50_&;nF2@S=1(Ey6p<%{y#NPx4hk?dn?N=(4>`HsBxfEwR)BrH~Q68@e}x? zUz~i+46T4w!Qdutw#JG(?Ky7KOl)dfqeRyluq;!#1uHLRna_?_!?kIeuUo#V?L&C+ z>RPuxR^S7EH&qX*W(b9X`!Kh)I2S z_3PQ$MD=GWK6ekrpR@eBh0*4{6bm*-RGWO)AW9GY?4167`@*v~a?fH;UErE7Y0_ht z`DA6?C)%^}iYPR7bosVmroAa-4z)E z*OGJPgme&;uaC9)F9WeCZvZvpP0iud|gL;F^?QqD(3QQx(ox-{0}lnay)avHVjZtP=;xn zRaDn9^bwtG=F;qv;FL^g+ITd}jI~7iXr_KGoxd*7v(7JvM=gTJhXCWH>=qcepMW)8u4LGGDuk+U+d6ypXpm;# z|ACy`WX*CMvuWAvIrgr0Qa9;Cnccm)W7h5I~~nsoiK` z&bO0S1e&W-KIl|@O#uKncPR|Xf27WK`FSc=;r4U(3{;2D$K3|LR49Jq{Y!@_Oi8{^ znqW}vKSOa2&?r5iZpx{2H@M1vcDY5&58<%y{Z{j!v!4f_?Nq8-f}JMw@wHav%#mR2 zTrq>sCAFGdF%TBKgtx>0LXK=952-AE@wdrae~L%f5vhM9qXF=hxCbvng!Y-;-CI;L zqL-4M{W+VW5SYBICOP$&F=V|eg!kQ+?SK8WmVR@#60GZ_r#V~=d>kYS`rMin*N{!Aq~Z6u@U#VT@p7?^#)u>`%zP zapnc*ec<|Y%qt`}5U52de>#p3UAu#}_>vxU3pMnk73BIRX@sjJ2=a)&_&3Z%bAq9& z1)yYYd6gWVz>da>z_G|`CYzE-vBF0Y!qG#szzwTfoF32M$2x4Rkb0ITH&h%QM;l5{ z=rXhh@ox>HMi<>7dc)YUrN3)&+PNd$I)W*pk)1GgJSeJ^X2cL8a7mV#`Y-YcC%bRT z6gj%E1rFPhvBHQJa>nlAB@&w}Te>@<9Iy!%;Y6m2ptWGaNVzyE0x|HF#EsN~TX8* z2$X1jsRt+`;g4A7pT$y3h?_mwoqS|aFmYb~-xNXHXFei55@v*F;(554b1yY0BwEIn zhDrU+rU`dM!sH-6_%yLPRs<|E1(M^vSWFSJ?7l3^h0+116cxd;#KY)Pya^eQpa>0n zsCS5-VKGXE^W%_40qbF`oIt*(EN%{|wmE6yJ}Gy0#vt_?3lsvIFo^)^$QpGc8V!r- z^?$*Q!EXe&p5olkX@D-0RV57+Uf&{k?~LDyY1}Yeb6@%A*2f(vZ``nx^%HlVI|toJ z&QzbOy;Is2({YsKSw-@UmkQ0of~v?O(W>!UiJ)8i;8(!DQC{0Qdc?C&1dP&e(9N0{IVH4^uz$QEq%ApP zD25?Le}_lXgLvzHoJ+Gwz2@C^@3MmOv`ZNc*p)mL1@%s$>N0^I70LDeXWc<@l6GLY z*N&61QG{?Vsj#?3hymK-mD=J-?HZ_`EW8)2{FT=Edc$Yz3U2hp=4LZbbaER@_cwHX8d-`)8=H;EeUyiL#|r1#GHKg_t?k9zHRO zB;(4ki=#?2R?;E27_VvF&dQ5R>M4x__`n~)4g0rUNNC6qC?CLIetV&6C=_7cYeN!% z8fXZR^8Ym9v7F!l-ZLxW=_swy;l{aeB?Y>vaPVRcl3=j5V^45*f+U>4ZB-E-nsv+} zj9++*iPwnyKHtMrN4YFj#+81T^4Cn_Pa}@v301Xk{bcd*Bb3?XMl9gZ^70WrP~A>E zf6D8(fqG==T>wP?V%ql*Jx9=iQhO8j%)|5W=b+nry?|V_=4pl_Bjru2e5R8EfpN5W z`+KHg0$xf=RS}%}dmx?`X;{GXwruls*?vbs;Cr56@5|q;@l%^> z)q{`1Q)|BXJ%43Vm|`$H#Rqc5h6T!Tr?5?+@~RWEaRNDGci1BKh$HRh?PIs%3rQc< z=bRnP>Q0zJGrJDVG_=$gL!GcKjej%nP!KTVcO$6sqOC+F4sSEz=GCT6Z}fUpb)VDRWdmUK{aeS>Fl+<9fRbA`M8)wH9(Ldy z!7k$pZwmTqAc;>Q9E+OQ$`rxJ?%T4-)K*l zVS%{ZEN*K<)4DH^{j?t|!_)2p3c~38V;79j()E{(P_y(lWUyc+y4#z)qd(ZWQ=B(8 zEFf6W!)WrNk?6RwuLFZ!w70c%iEY9qdJjbAeSq0+LQ!oqcuncaZdTrUng5`AjtTpr zIaD9V$bu1^WVV5SOYCteQt|zaG5m?!tLPKs?0MQ<2b?)6Ruf7^c%`ippnM>q{zHn- z;C?ey;g^&D!+~?dEE)s?DmZ~L_heGrtz4)1TcFkG9GqPN<u)B|3ilxAsPi{9SDpe3l8@L`f?)lOMMf6V4J_V)em zT@x_xTZygv>$-GReY((?q+VdUzYuO8XD*1ka`a&Vb%n7Ssy_>*$Vj7Suj8lxf@y9! z`d*FQx&))xQcr5nAo&wyJ3>G0D2 zXNzO!mlzhhy|B{h>=Jj}>l%{VX|#PE-0u>SGrH()pDe31JKU;NiNnec1T&BiTLeyfn#bLLpOV>8<0wlCeR2JyF)`DK#f!I0uu zoCtTX{(@dkkTU|BTX;>OMPT|3XIUl3A54&WR;iC)7S|UE*B4R#gJ(_AqCxfGTbPe| z7veQUo2OUm;5%%ZsSoWN9Y!-L@;ny3BGs3O%I)Cr73H=t(do%?5QRx;q(>O%J+R;< ze*~yY%AFmoA@Zapr+|jOn(JCdoc=Y`+@su{PQEJAFx9f>A#OIMlbd| z@A+ITdSTi6!eQKwjAV)lOXbB{?qr>0Se)!LL1c1ua;Ks3mB9B!M11xioFv;%r8lHO z+$M8ajeCKF6Uy7q>1ojW*YgGEiSP%h?SqxZS0PIewDd>)*GZE5@TR|kcoBywRDw~- z^DXi)+!={Fn|ddZ48(aQ$Gxj7e`p`{lI|!&d}lP!eZ)mOi*ZQ;3%yVZjv-)sDu>wQr`@Rv0<)rk+sdq^9!1=(1_H-z^1y&RHxq=3-N%m28>$pk3 z`hqA>m}yzc^kXtxJk5QC0Z)6E!uQak7lC?D@G8RKJG+=Lq1;P+_U~5c-v6!;=5Q|i zm*gyko#;buO4gGp*M<<)Fy7D;1K9CS-}`^te6FcXYG{Q45zp?eY(2Y7st!~x89m`3 zo1%t}@Gc2J_yf|oOpfa0rewygTgIRbSsp+a-JwMI0>|Y?y!di@jjh$A!&`jelIC1|zCyuI=GqO^b(-&geb7fWe!jM2pSYZH z9$qKfp1i*!j;t^WN&~shopZU@rwfVy{ykW@y}oA%rRp|FbiNdQz7|7NDk?fYJCAoa zSos3#sX4#i-Nv`OzrF;cth}pWX0198w|8fyb2C4>`A3p!g{I*Ii84|Kx0~jDIiOEoE=kweJaE>_*N#74!g3d~lt}l;|lk{1EL=M)RyS&~?muv?eN21m`dG z=#w?bwd62?-D^pA{}f`xz79>IApT>zo1OF4*TUU{?qr4WRFM$^2UJ$~tqGcRyE-DE z#l{NXQXCp{AK92GU__C?5NhfKVs{j@&x7;9Xj-$dbbpttUd$W8mne!%bCsw)uLSJr z7WvI4EPr&nOaSrvikYgPokFs(1Y%pq0KZ>G$SyWR^oD7VnsAc679~uLx9P_SrSY&` zMX3QtzkMTYv3wVd4*?M*6kaR+O~0d>eQ$Yk?TSj^HflVk-cq#={O1bFW@%e zLvK2*dE-mDv$)ayhfxD{yfTX0xWVRBY-JJx1QRjjy5w~3<5m!B(uF&jdpZp_0et)au z&GBmoZ)b?V9EYJ}IST{m{m*V<)79d7`c+`Y5j+91U|D9)-MnG3=NEdNcH-w=Bp{Sz zm&o2^rkeiOVpOs)H958xjQ<(GQs3O*WP#fmmDQH+)n&W>{<1+FH3> zo+jzGkAKvQ7$_9q+p%NxG!-Rttwm>cFebx-I)_cjyhm+C z4rSW|BL~HmVt1+;cLugY+?0*R)x|lr3)0irrvmyHJ8MS{?WdmW=;H~??W5w4-@on# z^e8&RGe&*qFuo7^CitoV!Nm6-9^TifnHs+p9led%RV{^8szVGI*P5fNw*l%IM@0u8 zlhL|~Xs3U*ikf&7GY>a=rb{C!AEur?+Akh{Z}D2(4XjVDg14KL z{Oj9FRO0%ZInmf_`eih+yvR?TgYZZ8wKKuAL!?M4bEm_o|BBI*2BcNFw3USQ#r~}N z5&b^+$L<0dY%V_xKpxxm?8IGpgGYhD5hO)!5*k2Sxf8Ju8|Q6@*sj0!Pea<_=F==K zRPJ=QgUZnwS3+)C)3)3XcBEXYeU!zy%aQp-)YHsW9h90wh^vrq*OD$2Z??~`)2Lp$ zprmbGhY>_3y@;z_uxw^3bM_nmkHrITaD)b$oJvVSol&6*fM$(BF*`>m1$mp3qY`I9 z8=*#Rqns-$A_o_agat$Hu!mw`X&92rv^vY>H<@bh;S2iZZzuKylV6{3gJ4CvJoLao zy9HLm&rBT6^xezH$_U$`#q#?bSzqsM$hJ{hJF=yc^713KR`Xx1K#4-KG_BU@x6!lA zRq>39GFj|#;ZA}r;a zU`-rOE=QP2fhfaQlEGOUD6#dYzNbQZJ6+#5T^FT{govT4i0nz0ZGD{Ji z>itUGr3mFF$!e9!^6I~fUv9?|SeF(vuWsM(v(Lcy!Pm3GjH~UsikR|;nN9vmbae@* zj51J`SWxCG@yn=zFgi1)?JOcwnWFpzB{4;TJk&8%jV)tg-!Wp9?X0XE*OWayEat2t zzeL`RG+jk?JdMuc&umsu7OPrmxtZFDt(M}+x*UxSv_E$fifPIqIAW)oKnYHT?6Rt( z=a{lq_|iiSPmAn`TnT(AOBSADIcK1xJRX3f+dNR#QFF#~Tvv1DTH3s&Jh+t2jW%m5 zU{Rr5bvzQf`~vOj+`nk-3EhuniO61fTw*Gz;zYsjqsED2g#7^uSw0bQG#)iUX2~vC z(dKY=y9FCqZQ-Hj@^2}@q7|R5O8mE~n#3x5r5XH4%=odrEoF6CxUshSrUmo(ggH>N z+-)$;tSij`J3S#yfv{*#rP5$gVS(sXaZth-*vlA;d%tg1dtb#qRmM^>f&fRbn&)B+ zXnKLZ4y#lo6qeObDaGI~IwRx@M88^2>-2`vWG2FXCHrn+Xb zu4i5gZM<}#hlsi>)$DEBmb0*t6GEC*k9EsDV;N06zT2>|_(~{s1p%dh0pJKAo`e@( z_QD|7DG#%sjOo+PiRKVXb*aLt`j)4p#?3OQBrGz$zTmTv&lQjfQ7)+DP|Nt14!x7B z9UpVE1kOmNlUg(YGR0mz{;`4O zx_<{lKl6EiTtBu4>wz5GTl)(W1(GXPi9OIr_LRmI;xNV@kV6oh1LC%}b@7h{lqs!u zM)hPYSwGu~d7o z72TRQd*H^%p_q3_=ud1SV`VTc^eGIU$qPjCTa&*m-XLk7cx#4ZrQCeEhQ1Q8M8_1H zpkwG6zPN)|6w{BtB++7Ol-OVXDx;0W~%zsPYLl}EB zH$225A!1SXaz*KezlXfXdg1T%g*(hiy5dGyx(2OjtXXtm6NebH46){?L>J)y{Up-? z3r;r1(ioWi>obD?$_)#l7bF=Sn3KF~U)*LENn+YA%r>wo05ESv1j%u^!2WhmyAIR& zLmZE&IUp>Ktk=OaLYGgR25TyBJhSGQrOz`(+-aPkE5jxWqBbVR*JaYUF^0b9V{Q7$ z-l*m%>@MV5lIFeX6X9Pn=+~czPd|q;N{;un!bH;CQNo*{-yv3X`&Eflg+4>BCfRkS zi88t%|E1B7B9&`$o@AoQtF(40_d+j)fl?bFR6Vv|BIAax!**-c7j1?|P{qTq>?PHb z`V9=Qqv=!g!&4$TxF~&`l(n?yp}x(1&|DV_%$-h7hXaYZZLG zm_w5H08aSp@-AgBON+E#rgfOys}-ksMJyd)UV8;aF@`~CQXrB@j{D)2YZa8ZG;wHm z{>hVuNl1sZ+LUTWXQO&gx-Rs4(&g2-@B`f|^+IyVfEoJVTHlsMLY6C=1|<1;uaR}a zPdilOs?lL!<>#+@cxiNydiYw`_+Jw!{U^q?30X6G#f|ZB17%H|)KHL}MyZD!X9l%^ z0o~s0r~R*xG&Cp;3Idm2NYHxte;v6AIt{k^#d98z)7==IQw@sO!)31f!PPki>@yP{ z?{h<*-SIhy-5*=4jOzzB+GAp&&G?nP?x+G4`pIlNS;*9@GOb|DR2dgCmBha{e#zqO zdNw4+cyO3NPg?b%m)0YI_E9q0F^Tm8TA`^=ZU43qmlmV-;SF==-O+m?aoqYPeRM-d zXIMjI`c#Z-1z3S?EljBS=3!vc+d*Ggq_-g^XAq4wLp%`=`h(&^nTEEIx30BgU!d<2 z+P&0cF~HPg7z|$u)PYk%V6WgED}r^4FvZH$k^Hk6K~=~-2`GfXS>YH%$)n{5B!Va1 zxS`)fq~1nU`ag^Ik&wyNGo{B}dN}UPg*^YA(3OGOxX}8`CduVe zeONTpuh46uSWz%NnEKQcOa`MY&_15b3tYlyP7Kh!JPsUvKa}W&cW2%t|$+ zE|y;eKuBR-ue{@tUiz~60V|{kj>A(UWn?y#s716utA}FNa!W`Ab8CzR6HJYA{ z9&0IRa{izfV;TzWyAf2aq&kw^o|ABWtxJ2+o=y~!6nopK*~miQ>wxmLJ5wE3Cl+yX8=_hT;FzYe0j z*C6k+`PWi1yPpZVy?<@10Bn8qvhOuVS8BfR<|>5cGQXzM+etjj^k%tT;FG(0Y|hEv z=LxdDdNFms9_m7Jrf|Itx1C$wpE}!*6SA*-4r36y+_pzHDY|*tou`9OWb+_A4NF9*ZXBh z_4iE(1KZ|l&*6Fs_mt+;#i=03P@F{^-iscVo|9;z8Nb!T?t7vl zJKz$h$I}6;3{DGCo>4Z|*MBecH#4zg6|qj6|401y?4i`Q}zBHpsfA15!s6p1M- z#-ON^X@&V&>UBA)82rGla7Scg)1LsParMao;SqitR}7zkyl(gXsjN$#Q!pMaqungb z>!sm|!eqAEzAE|3M?-~$f05B+Z$p?nbIir@3*wX|*iyxHL9)^ES4LdfW0d7W-nS@I zjQ*zK03r9aw~lu1{J83@rG-T{FdS3CiKVA*Gc~#6VkCwC~DyV)VTHEJbdXERq!ZH5bUfd##G8vF;t^6m(^Efe=-s^aO_XDF9wMkWwdKu zyvkFVno3=kje6BtC7xBsEE}MhRXQt>TAX_!LsVo?sb@+}wtWG#?pHSq(5fy-FJIA6w03m>|o+#vncv zbGc4Ux)UvF+AGyZosaGu%N#t4bv~a7{-8N-@T%L*8?z6XD3WF2P8~V!-^j;7Au9sr zDxzsvjhu58%QaS$CtY)41WO+2Cl3$~Ozz91onvH7$1edxb9vhBBpD@%&a#umW0bYc zbFeg%$7?3Vs|S}u(W3^`DcSmm3yqR(4nSKowpv^y<>rq@E~>(ojbNh@Q}M4X$ov^Y zY@7_G@FU}|_R4RS>q=2JCY?!R;{~2GGHPR}TSK8C#D3s-31!md9GfbXCW05{sM#%x zB}0y{(%=B|!bbWce{&Dr&Gm9~!$wy&ye(7Pi}NPWl%>c3BiA)L?5@WjzE zwpCo+ZxPS|iugN;x3#{d450$doABhEcR6~by|ZQMA04bIo##T{76kmlrRMCqKP zEjFGr0l2qt`sW)Ot%q0S6S*AQMTNg5SNC7hJ8uA6?dIYZ=bGQFj$vg|=<(LU8-rXj zIYz;-Og&UfqY}c+8g5X*T-I4j*ZGg?Jfjft6ALolp|2mp(gr1q zjk!2|u`Q&Z6EzXzhyFwWK}ljY=8?=NiJn>Wwe+Y_$I7@;zN#*EcrQq z*Wx^TysKNn$c~u>w`XLXl2|$HDqsvEOdMiEXA*K)VF;%~F80FRoF$}Tl6Hi+#3`oX zM?=1-5Er2f=1?8by73OLWAt^1bj+G=O*g4Yo{i7Z*j6uugagGDmgP}wDcQR3x{^%9%FoZv8j@r zgdwn;QCa6fOrM2lz^XJVj1?Vx#tN*K+&0Q)P2v~tF0SbYPnqRLVQ6(w@Z(G^JvnWF z&~9eqL2-Jz452r2U&|lZz(Bz+;AsHHINMTQ#3Oc3pemu2cfl!0in~t88<&Q)ptq1X zD3*i?rbLfm)odBR)_Ek_dEJ1T{_TfE@-f6V zpZtb^n_VUWr4hOIZx!0_n&VVC4_z+0ZwJ1%TP}0tCALDJfhIyVvfC^L1uJ#NR|cbD z@c~~e#f$a(Ya1B`RS~>U%=4^3&F04)YH_?t7}j4?af8z!nh3{?bQ$LwMgw|2sDj;t z9Jr|(Lifh%gErRR(UHz!6$SV?Ml1(J7zundaMXyQ!Axj`pe0bsYSFqdI`h!WEzd%#=9N9{C|j)XH?fd>k2Q2H@yD?KT0Cd5ES5S^ORHrlP5_f)0Zw>2nfVau zuW=DGH!#W-NP{7z{O0)~dF?-iGE3+D)StoZ^w7QPa|qvf_~kC(dB3-V)rW6^;GoU^)FR0Rh!|49QWJq!;Eg(@IIAMM|yF~_voMY{D8oJNV-mu3)5vDG4^Ve6kot75~YP)`2 zSq&3ktOdEO8K;_2M68XXcT@^WU7W@JGGO;OfQY&l>s>6wwgcod9tD%NM@A@M{$67= z30(~f#Uu+rR+eAzYkFD-o#SPlevUo%{PPA?xLtbthKqZ1MA%c;%&4xjBHSIxmcb8k z-x6u|LD!5~;PSKFXx4U3qI%FQ3qdVIFV+!vGg!WwTQQ|zcluxTjVS$yrB6;tBZhLq;)471{U zd+3bYiiX`qL7cm4ndJ5!)*UUJbuBF7N7smHRWIJ2+J%Tu%5=gkO^W~EcLb9tcKaN( zq~#yTQ8BNy5cjdSmw`Hr1>UVy8Uvhg#b4aK6XoP(0g5tk%+N#Yg7FaT1=##JNLYl1 z-a^Ku_u4b0vn5JGlH4a0K#cunNk*_)*an48?r{2s99=hP_VzTLE89Gt&lRg$&ick8 zMog(6B3^zz+98Ez@uCHzh}E@kO=b?9oh4T?sfhaEeGvWaW5G^?fB6k+rKBONF@9s` zEKzuk0&45S5CbnX&A|+rYqjt&+pZRm;bhfiDv>BythB%?qVv#goM?&F1^mgpa>~#i zU?_^w>oL>fhZtEkm{ry6{g7aDs)&_UkStZ|N>qlbLVv+~{L_&>v`MlD6Yi$A>Mv0j z!vjPGF}LP0b3{Z&5?MOd#|SXg+~QW{mfs;M02muvO9{V{OhPMj!_P+Gy3;S&KBIIxhTtdrr~Q{Ms^?*i@iTT>3_ z58M7|yaF#_>ky&^SA#fgZWfuP?NXB0HSD-|b+spiue}_jCX}KA(ctUq{$c`ob--dd z048993C-xpd{(nAxq4;@Sb$Prp`$R2BccA3^J510U*fyr`Cfp+tuiL|iehQ{=NU(( zI7!U@9c5y*q|es?^)q&^#5C-yd#f|3^r=i8sq0@?3JG<$NJ2c%7SN@Zkztk7ar8xY zuH8C7^oHVWhvsbOrH9ALfU^%Jzr|~I1C$J3o9(<1_1jy4+n`@VU7)#IeBfcm46sS= zny@Y{6YjTIeoWCT&7p<`^Q>>liQ`L93c&r{;ep&F5BefTtn8zT&Da>#u%>V|4j@Z|K;$*K{&q_A*+ zS$Vkn#-ORZSGQM#o#irZzPhRx|JM!1bsBZ~>$jX-gZD0bEyINWZn`(e2u;tF>9*%r zJsJutO?jMRbKrFCVQ5*-I4&#>GeB(r+FL1G_k-QTW7M6;dwYB5^o{-W^u25{Cbz8Q z=EkqJ6(iNr_I_1TjATzYeM`~6Du3?8H)s^={^pwUB!+E&TVZE=&&39OATk;$m;mZU zuDLaY@d=;xlE0OZukaROiIGhV^Vg4mSa7A*gAQiD7w*fj^|~2Yo}e7S1lYxaO=n_8 z9ca9Thp}kf5Ds-t%xrIgp12}`^wS*K+v5|edWUM_vx|H!0HYNjs&QoJL1jZ%0Wq;d7Bu0vpW`O{^-tqtiSvaYDHuR+u5f)n0~|+hma%8popDB`F8+~IsqM(b5bt@XbeLVdd`dP5xcCQ zev?WGpWZj-sO_gYm?oH&@kki_i3`ehjm8?2rHQKA6f(ifd;>*k?DS&%KqxC)Q zL;qz_1O-rD*vEH3-5Z$}kM!m{W9AkOzF$L3? zje*m>%7(TCdrUk7;uEVDk7Pf7P_6eKDVPtQP&cstcHoY!`G-+TOz@XVpgqR^k%h&R z4`|J*o`9D3=6ck!3#6t?&+aAsk_&swh0px@rs8^6vGFrNYwK!ht=ek!Q#_SFBoSy7 zpX8oEnC2Q~JNR@v;B-6Wlz-->r6X23AiulnmI(5Ad?ak&o~zj#^5$753IIte3}$zx zwRRr*<#6&+4fFZj?xjKmT~PP@`F8eHyhyO1mLO9RqmU#xx8F3a$@w^2^p^Yt#xuJA zJlg68f_4iSOjG{`)91aRlLQC5(2-{VlzRr7n^gt}mk<{Cn$Zpp$wABJ5OaLy(fJhK z5-iy<5NFpT{zO5g#S}K-UC$o9tImX08X)R@7GM*RZedgR_9z4fP1BKXv4jRqeM1xW z%yai*Sie|AFcMFkbZ(+e`uO=E1tX4f26Oj6Aou{F@kpZar6?Cxiv7u??VlhQp+t&kIC-lpETHy zREPc3o%r5t z!6H2iA62{0u^v!849^k#jB*E5p*%$*SKk0fk<7L4V*vsWT8}mkG`K)04!B1ZRSW_% z_|x&pa#^GmLz#U*XxMhb9KX=?<;=Zg&x_K)+q}GU`|8{L}GPE+vGX5}yG$}9~AiA*B z43*UPSAC!eTxNd&5E3eqhNvJEX)GdPtc;8DV0q(}xH6CCyJXN>Ic1Dc_?xzT`3VhO zxLg=&Q$2YuWvG9r=k41RuH%&J6~ngIGyivnpXW$2RPJ6YSzqByhxbY=Ro_YejHg>9 z28W%84DIASG)&iYz-)vkNDz!2Ad;%rdKF|Nil*25BNMYr+7Uhu5qrnfAJWdA$2aP? zx9VKLEut1zgwQo48jJuqXpa6M9aKMtX+19{sI*+JJTS@txU~QIAZG6*=gt+MHg(cp zOejSqG5i98W@e!9soqntO=M*pFqL0@FLMD*P;b~D3mAPa2C&Q&QbC9h05XnX{ap)V zFwENA2WiIi8DK781;1jTG{R!wzE;i|u(Z{w#VfkHsFJ^gOK2}0^f9v{U@;TxR{`NbH zyW;<~m>FIzxD9ItVW7k!D_J_~`w3RC-GDgh9p=$zmO|45Da>~Vc%KQCX_F3dWI1^! zD8(0xsJnqrE)$gbfO}z!_wFQgXfn8hF__Ql^6F#KSt5^;>32FVvexh5_3l46{X{nj zvqVW>3QV1{axN<^wYO@pT~GM-G*k$fw6)hAu{1YX7I(D^xk8^V(oI5AR@d zfrktEg@W+RfiKeyuswZ!hCmaKqO&VyO4zF6xzbA2LBD;_>d>-MweB$U5;wOWDPxd! zPG3D%4*{RBwDM^Cl8|q%-+W&xUG3^}_qa^c(A1GkDXoY$fg+&Pxj-D1%xI3$@v!i} z%N;m$F90-rM-^n)Y&&MMae3_Q=v>==c>TO;t^eZP%RdMP@D=q2Jz6fkzUmnNpu|J1 zEaVd2Q-BI530b^>m5&HT()^gbVo%m+2+)({MuMh2&Q4C9~E*r{Zg! z+Uheh_*sU_7GP2x-BNa?j@$_+5cC)U&)Lm`(fu6E*h*@<`yjQK+cOf9#70feVZEi} zNT{JYZ{Gd~Q_SOPuCBkD@>4R9-dtOTGPDpdRPERND&Z^F`@EAh=z5kludg3Gs9fC( zNqQ<4URO$TMS#1fbs-b;{Aeq%-;VaN;T_`8-QN6H%kTd3+P>=k{wq-MQy1^;=8-K! ziD%u#t$_sp%i`O>wTIE_=@L1laKu(=T7lYfxw`_OM_4=IxyW-YSdz(a^3t|ZRHKY} ziS*g@aFUkw-hxkAA)`uxTUW5Dj@n67wnR<_$@5gKQcrIKd~W=XZBW2XA=L z`sVf)?xOqKNM~q<8UlCaAI8T=MJGL~uNh1L4#1|`wBl?x@_tfy@)!Gwba`=|w}&_b zPo0y4tB2A7_w7KHE7GGq-@0Upjh5}Ud)C?UA4~0=dP&TU7V8(IVEr%`pM_a247$F_ zxBboBAWJ=|gdLxYp_#kYx1rmf3&4be_Uxigr0p(KdC$ql9;)RN;l%~}H}!_%qg(}m zGbB6xJSOp}tRCBySE{x=l(XJ-<1MrM!FoOTeBW)hF?ch^L{c2b37?%?t-jGP4@Ga6 zjEGX1Lx^rSZx4< z4JSt}tJT%zXc%I+wDYK5SGRY%Yi<51C|of9#WodZ$v<| zal6r{8L|xV8|J zz?ktk7;qLLR3$9VeTU3_v&-hc+tqkL7`%zR>*wpH>HU2rrECfp3%Lh0m@_qVSV~U7 zG|_v%3TM27brtExgnSSb%Jk5Qp5NxuZNbX3jE*Lk?V*Um6l|%2m7vEAMcYGl`BZ&o zN_U<*9&v7S+pehe>CfEjL3t0LxK~asp<-TS77d2MW?>qRrA&e`?vzZ(rdK@b^(_-B zKp#(Zh@_TCG!_UUPB3KA5K~MSqeU__Nr$Z?)kr2nlCrmxp&gz8j}z?sxk_<(VrBP#qF~fDrypWggaDzPkk8cWV3~mVS z$2A2a50=oKgbq-iw%rgx^?BmBCGR-pIxhk*BdpljEPsqCC7<(DQXOBrh=s3 z(wM%ec3s9F$4~L+5E1&rt85{g$zgXw(2E5PEvt z-}}qYZU=DZ92jiDM>YFOUA*-Mf4UDM$6K4lM&2{SXYOm+c#Ra09>w3s(RJht zmxw&>&tP@zq?0>J3^&3GVJ0~-d`FFW%qp<#y9IFG2v6~~_j_<=>@J48>*Zwco-H%< zs#zBu?jUT<3;CJd;9Q7?`2d2|#mq*9FmqGDN)tEcXgtsqXn47*Q%qIpre*Ho0=4dB z1}&jb?1iTlPTRsK2Pi}lt`pDo7#=mTgf-SX`|V2`ztQ)iDD=Ps`u>K7`ryOq#!qhE zuK`jR^gbT=I$uFw;dj&_);t?NT$;4yuFo~G7%pg%&fapSoe#rC{dlJKh{Nz+o?dIj zzpb=8Ki{{f`94oX??+A_axr4$Ju7as)@=7<_YqCgYdI@JS(3_f4iyIHf%tJL(mH1tvd zOW%MHxK2GK-%tJG2jK5t|2;L7o0~(Z_z|-a68^uj`#tl=*d3|y%T8-Cty@j4*{0h2 z!cxi|xi&Ntk|Vr^F7BTM<+36w84;;$Vk|QLU6YeF)e?=GB0eH=z-RyuVQk=y#UE&= z+^kKw!u2eAEB1BTEd6$2{j@A=S8gR2t@asx3@+()x^-`_@5MStl-!1F&i3PvW5DvU3+!{;!*AmC;?bokx&ye1jOf955e5UQjrnbSy}YJRLl=~OWH2@7V7!MW-6(bD{bj*R2uvj*l6xI5o9NZ=Iwv-9e(X!}^XuHyr74Y7H4LesBk=)mnLF?hLXD z!nC4YDzPSu!(a=QhC2DNg*%f5V3wkCGVY!zG=LSl_(md1Vv919#w^S4q#v^0>6b3R z$e#K8cf7*JTKpPJQtj5tFI$L$PVfoQ=wnZBA06=Nd%%1u*ELWn_GuZvf$Bq6mUeSQ*R`Pv8v4?HwtW)(DE6)lhkHvPx6w_?;kyn?&?P^ne^J zr_A81TDmpE2%lMas!M3uU$U8IHWkMaqs|!VR77CXV@r`cN#2p3b0{D2aC98+_c+lv zFedu{8nHtnBlV*iAufH!?`KgSWUD3eF_60iQ;b1)fK=C%9gCosgB?T%);1LZ(zuT_ zi{^b0K(pB0isrm`H0I9BYvkqAhX7B0@zYma(O-N0;dxCk{%=6V$8)|>CDljBF4CY^ zV&Gaw(&oSnAl^Ir64o zy7s+`)s62%g01VNml)%X3xjpZf-=w5Gobn~!nggiGWga4DO`^-@{h;V$W*U;d{*RJ zPiJGwU|gl|j2XZK#+V(Vt^oH|0e5{!PM9-L$wcCGGWqNlI z#3dk~e)6;q)Jp_X#RDaC?P_zvy1eG&{nxT(RrA>{XJK+~zNWlquhmNNb4N(@cZ|Ge z>-dkn#8dNXZVJ9}g7r&bKtgspuC`n(xgHO4ZurU?RrhyRxO!w^rhqtq@N&0q(Wbt9 z+8Ym+wkrH7P3C=}RQCSPjcAS$g&{U}S49oVQ(ao4ny2nYX%9eqHLZtm+B3qQ=pw^l zl1fz0J_Uu@dnko&O06;xsViWuOXDrB%gYo^)Tzh52xa7I6Y)%mtpp4w9AOUA)RqmRW zA`>)?bfmJDDwo(*UOfa#PjwaGBxG~n{OVSvBBOqqf_xQNv5C;ZSKdWa2pPL85HM9d z5K;yun+*~2=A@9lReA35uL{zuy#<9(famv{I;8fAUHgn6J;13qh>cBHW4Ic{x*Dhz z#Dj$}Fdvu|9quJz);Iae0FTJn9ixM-Y@@O+&HN=gEo5$2bfW-N2>@L%T3t+{AX10+F{?2V(Xi4FAR=nhlAVQ507OlJ27;Ra9ZE01ktSz`?ImNE! zymmz}QcdHbMk3ibgZLOnzJ1O*&HJIHLL?C8e(aeD-b|qo zAFqHhP@zh{x_W~R@L>PlPHI6kA_x;SjXy5NQ80D->F==)en%!lEkxE2rKZbWjEtr zd~5`=lQrCnLr0+LPv%9PI~&#I(?`PV$`Y!F=ST-2LR)YgV|P(do^W^bitVY# z{dO(hk?0^Jf(p7TL+zkr*6aBz)ys|aJdh%K)qEoI9*QgI>76;W0tgeqyUsFx5(!h8KM^;R5}QHq1bKM*IMDP zCx+PVz1Z(^;V<%k+kS}*HcL)i8-K85+ZHs$dliLx6>)&2e0@VH8GuDDJVDfm98PTR zo1Bvs_W6wJZ1ab`QCOD{-3F_fCf_ot)Fr9ZA_Q_hG;4>bN|1NOR= z+yS1&<4JUCqOV7cTEW$lA+I#@OrV8Y{n8Y*dcq>dy{R!r zT{?(|m>gHZk7)Yn&(%tY;Q6@6-fDMIkwj|E=6EiPeaTABu{&f;LG!(E-Y7gu!naPvuce0*r5jY25}Z0t%yaPb9o-9t*_0;P)>*Ye z(yK5IozHy#5=`v|GT_^xyu^((QTBUmwqe_83WQYK(Y#@{C<-!MEeP*UW32%R;{Zh6 z1Gp$F7p#P{Hk4OX;eD+avMNZg#@v8v$cW02P!+aNOzj-~l(+1LHk-)tIz?cy~9RMwqIE3hexZ$;pU>Uu|IIg3332I#E!VAiqiaPFG z6R;q95dLI^^B0C1xg<2F0^(37p`v7Tt3&lfMUm5~^|-JSjHFFwihsltEXdLy_UlKB zFRNghsXZErGecDu9B0E_fVC*BRvpJ&qKp}6J9_IwHw^|PgQ7z3>;jEP-T@;FUB6DH zV!I2)8KC)NbQSrdbm!8Qp}scIzm?1U3nX+T2{aoe(9+j#%GjFr=eGkBmUg74tY69zj!uTe*QYt-ULK?S8qm0tZIKHmPIKfc-NeD!c#_ zhJ|IK!$ygpO-+v;{^(UFG6&f1j?oo5Yq#yUI>?F4uW?#Mt=CgfZuGYqAFBiJUKtdO z*(75eD*Pdm0Oyv#h%J=*GuN@x^H)ko@oMO}V_De$W8%A~b++Y-_V774Ei3$; z@P!cUS!Cr?kGyq+$_)VwTpFzFcj}b{+@6unTl7*a&GJ)8nfz6ix^xLI2yZ#jr{W)xe$AUs=X_viguV#1jmb^MW>#Gx(;=E+M`M3xnD5gz)BXjqYyP2y~zwR^Ou? zr7BsZ=)WM<;03_R#0btOOoD6%$I(Bb>IPo4WR;w39+_X{^nU$Ddb%){a+Aq_7VV6Y zp#NGN@gZ?m_Z&9ATxIl3uk?mnlNDM|gYum6%j|y3STWxHExUW%e_=@~y$vl##HnYy zfO(a2T;~Nc?x-Ft`U*}fXZT8-+!apSNSo6p2**>Vd^bd9+_O$DDulA|Ixma)cYU;-gr#mM}#)rttVD*b_F5VMuE~k>cPJen{ix z6EOhTI|Up&g_9lqTZEYs{-D8Y=Hfu8au%;vb@WIep|E`Y3cTQg^ybY`ih{s>?Tnb0 zc;@xu3)Sa%$bjb;$NjQ(you!UqT#rcw_8~|&yC@HdFFoLzsMEm_{1D0>C_bgEaV-x zl2nhpdgY55cwy+TMCa7Tx(KPoo z{VlI?diwXFW%);}Vfqs>00GlVnP1M;a)^MpIW0Gy;n(bgcmJ1B{*YUa{!@xp0-*tM z2E|W!2lqFAqAaPrxV%<-;o`MOK!QQ?U7ekn8)8l9^qHZHyZ%kIl~T zHUHL~g!2HY2IZxX!UO!Trts$ROAYylpw zUWV(gzmC>xybkkv-Y&;&yT949z8{Bw4tCoR!@V^5J`z#a<8|=S!;6>QbJ2kC<+8iQ zS;Kyz_dVwb$Zz_7!ocCz>7aZ0F`>-&EYCTP-^OgOy}p0?y{qzofK}2JQRVQXpYe`| zF;P@!s&kE=y9FBt9ZhQoRTOynoKb7z?KSbap#5xlgx<&86FErp{yPpVfPC?KR&ndd znQzexVy76+Y^8R{CW^$(+4hS&yr;u7r$Lj5&n)OwX*S}w za4Rk)Ar;~!<#$n*_c!u;-P{gGtp1k4QT$&0I^!m=Sx48>!5!Z6%Gj3q$W~1AL99gV zOF&Bfq%kd5q(OzXWM=~0U*czM#MX?EEhzA5CHld3&F7rqk@M9e3wpR>c= zTnv(XmyWwgoklC!Dxl(_VUXf`p%M@6_7H0GHdhH&=q>IQnHkZ~; z6^D6`sV5BDu0)0mFGQW+BRtmD4rINmlT6i0f-a+|0Nku4uF2l6d4NMR| zzS<+&hGH|VA+IU>ENmf%j${U`P8*{p#jUIiD$ZD+wZ-8dt$T8?M1WE@*RyQ&G4sB2 zu`vj@UBd5?&R}aT7a6j@TiC*cx~?0|l6=qBS5#R(;l zh9D2I0gfZ9rPKD-c9)KlT7oDt12b8xlRRY>d@U*ymeCmy>}^(4GhK&;u_jx*%WU<* z)~Q&+6cc#xFb9P5sg-1KjCW#vjGLO9|EAA6EsI8NPEm07@z-~(3sRSvgXIEHrMt*2 zh5ew^fh?-4jJ2mmFIhUtP{O%0l%^&$9VOf=09zRaIS=^fkw?GdI=BpO&BPaS9Os&u zRL3lzOo{h>BgOe0HG+&;$4u|j&#{X~u%6qN0`0oC z=VA9hktccvm3@i@m;AxhM^Uu%YsWI_ki`lzow&t5c?SL_?qETeV5X^|>ME|c&k$`e z0QN~jl_W7mLw)k|LqdJp^FzYE`1*H!?$YD`vJZ#i7}|Y%;GPX`nBYPiCidZDm7qe# zR%v?4p_u(f)qmp(JfKI zBy;}llXz#MK!G75EmeZy1`iEF?vUFI04(M4!a>IHIKQ2|^Yh-)0QSM32xZIum>>xI z-=y|Y!Nw?Yu!6)mu7LJ1^j(oBp%rqekWG9<5YoVp6X~uklS9M;m}ITv{AL=*{`UC0 zp?RUl5#pJs6Zr}=AN6zibEsG;4W2sDgdA;JWLAkEc*l^GKHy%SZXvDE>;Y6Xz=Rtfei zBD>4i6O3i9QOL!3aXr5Zc()D00Dhfeo*b0v=$_)nyLu)`-Bs!1#)7NF0yQDZU&j6= zNo4Ph0~wdsqoiMmKJX7+qGq#)A}C)dfebc^yMa77_a1(`+2v3A{=b}kqrjuw*X#7+5lA4U!JAp!)hfhfl0Z`)=P$(NP z4$osAf;?w5zBk2@>K^~ z4C(y^ttDY*g`)NMfP^xX=n`EB7bWZwW8~?b@E(k+y6gw2 z%uP?eHgQ-ix$%Mgs}~Fn${sTZqYw_(hRO~P^om)_N_1|uBm;F$y3q=X?3jLhxRB?+ zxL_23e5eKB651Z^76VibbVu?M9c9O&d<;#pSe6LW2359rDk-aZ2e3eeJCeg&MfDcrt?*riqow**WgSP7iFpFKRQat1gg8;y%!My9Gb`A;a5 zP*p@>JNAn;n2+TdDYJ#qU&}5bBlLB+abZS2UOtsX#D>)-@zD_1TKOMJ4|=AE2-ZJq zTaPg)?)(Dw8zeNu0AL}Y9^3YSBYOcHOE`oSDa~{*a8_o`_Yb5T#k@EbJ(g@i%qzIe zTR}y-Utd&ZBT2;rXQ&D!OwkKgqzLn6JK{7q73>ejWW-5D#f4@0gH3zNhmNNBxzNXk zz$KWYVUmy1;#Nk*Md}*#bT{>s_kyh3lhV6YmMLdRsjNp`9)NnJk2)uZ#KQsdl%yf^ zXiMsuu82$6@+>XMFP>BKHSU?Hy(Yu}i;rEx3g`3<`z*#d2A`WF(Q$Ml_FMn?%I$8w z<d;C4b)fQW`@#;_I^%P0U_JzC2{F5Oa zx987QENLH(4#4mJLlHmRypIkP43|go5OVakT&AV2={t|lZ3~K4bHE8c??+p1Fz9)Z zKbF40-q*yxef|z*AZ+M{nqky9k>>a~-t;ZR0K5D~d*A3U19DGa-Mn(NS=bkTc zCdV<24W`{MchB3m=F4BZJ%Fou{_jcl_F!(G8NjVQfWE%tHD+?}C*u44^x^HDF`dot zZgaTB&zsQdc$wX|y_r|G2GGUqZg)E=7|~mCIqmS&%I0vq_+fhUk6If&Jdf*T^ULVp zUqdee2xwdH7|&-1I-Yvlfb1++Ru``UeunHZFE?Rxi@YRg+3veOl$i9W7z}!X98KQe zUN%DjhN;f`-R|+{Cc2pz!1dWwl-}oBX6$VHxcdR~@9)tm(0jhW=@~AU;e3F3J-xS= zmF@EcIoouNrfKw=&ZnfE-{6Ps;JaG--a~dvy1i=buGd(P14o*zwfN0U^sSAtrjdB` z+LSp4d(R`)yP*rS6!UUdT~T}yY*_3PQvY%oT27Iov#|q%TAAFDZO){n$3v&YWSXA< z_Mw)XvTmO@`8XDG@bNYhGyfjBl-|NOad&g!U0wjkihi z%+F`(WVLmBjh{g2JZ+-OJ(Ojwk6E?gG=n-(VA|$;%Xs>)aImW}4ts0)eS4GAeNPe3X4y)`MWAI^v zQ~Xr2)87AabxzS?M%xyS)!1rmyRp&OjcwcbW2>=kqp{W4wynlaZpOLeKHT%RU-!!% zYt239H~DwjjM$7gTkec-aNt*k0!7A_p{3Va0cT)5D*2stvDRfeCO)`Mu}OGo#wAh6 zV9JZTT@E7lZvFNx`{=o|`?2wg&*9V=DldCW_Nas){pOrP82HU8Of`3vDjlNCvQcl; z$Tt@rlI=1BuPolKbHi7$6ESXuBiq0@;7q_>TE9`Q*_fSvl7#EJVh*EPVZc#YRJ!8m zGeoCKM`>6y&_rAHtF@QAeWQ4eNwe`MO)#V8#bvz_ZuW`^cg)3|UEl_Hl=%V^+g5ne zh3TBNhwyK=08=i!tf zRTH*eu+sAJ+}(Y|)pN`R+cr7fb?}_#hfIMOH$9k2u8iHFb?WpM+o(&pWKW_dOLXyA zMo$2vNEwJB6NV?xppZ@q=sTaof~Cr6h;lf`d0fa^1JM7|SWm{>ccn>Fi3xq+LI2v8 z(f&rH{N4Jp`lxqis=a7P0`FiyAbB#%vY@}fjj4k)bQ)ek45k~(EHHP{?7IwTq$3%s zK&Aa?cE0d6T~eJOG9SHi<*bH`s?Idbprk)l8GDD2-fxQ_a&U+Q<;brBWUm<5+!)}5+}rr3%_ z(s<#UwNSwfHd`)eXj)J%2KAwZN{D(LD*>1WuB@OwGl+>9u{gtTZxWKDx~SU*^~qb7 znCm)G>t95XQHrSFq z$3Y%HH4yBK&Tnnl1Nx|~$y)shR_zp3BY3UsE{?LD8yZ$#6phKz1W&GN*MDV@OzsvV ze=0Pk&N`4eT|hItomi!|qlQaJpkHbdDyG7>H-`w%SKQB4a$Knl$JPCl7|oq?15@||I1?d zU#nT-(Xlg8*dNJ5I-?nIZdzLqY{{*$`r9bMXt1O$It*^qvrIb!U!~T5#=JkvD|Hb< z2i!@>!^GvN9Q_$*$=a#lM5uj)<32W2Xh9+e#>6OM zxpj%XCj$~;_c*cK5nm4EfQZ!X0b&@-9us$AZogM_$V5pH49=EoL76m)?P1DA*c}{? z>BOP$a?s8eFu!OBf)H_{(U4?+lB(xNuA)y0f6FSDOh35Q;KWtLm^wP}SSE8IItS8& zs(HSdLaT+Qj)s`HaAr#uGf{_dAA8`0C}DHtOGEEak+NNJnJp7uXg45qYZJD3^%4cT zGa**u$-o$qsLNxUsl(ENH9=Pj3@~}~-r<-M4K#GU3cnF8f9Ry<>7?%Hyd#CUV+x?Z zoC9Ng#1ycucG#a_%RYHi?wf=!)Icrc<>_rt^~Xv3+lD&>;BOIdrx00(+%9mZ*pmIb zmI3zz%60Ze{em!0wEbT}!xqoSyd72R+gkH}ik-|`JnyTd()#PHn(gL`%Xtmq$5Wse zVAA0@My2~WhxB zdeh4uM-TFCHxw0Zn~)vZYgyaaf@X))SVr>j+8uDng^K~)outxX9${*7!stI_>f zdzIL9iOhN1JGhpZg57!pMxKL%33*TCeczGr9zXxp1#8`EIL|UPG-km+a-&_6t`X8} z3#|3eDAb6SdXVvANeJh3G9rfhZY3UZSx8sxVgxtBcrFAz)xf?iiw*J?tTJcZ$ zqzdV>b%c>|YP_*LP8pnc>R$jtGjjVuy`O@Rl079=t8W9WoeR`4(By-pTS`miK2i^3 zRz>8mL;n_`A;jlfVfd>mS)HUE~>KUZC3KZ)5oQj1mP8f8J4( z&~5?iZBc8ee5R&QC;ON(%fY(ny}1XaTvj9bp+@tmeBm%i7gvvzKKK-*oNkr8*~ zb@l?=*@kq_C!Yu#5Jrpx9VH{U(dbiRU7dUFykVPuBDH2;o_ti40^e*~5=bk&>N0xL zZfj;_ucaxjEL}Z>c=q3{GwpAj@1%rZA_WHs2=bSj;b>xL;HYQfVr$|2pH`G8buD}R zg|y4XlNtO*=lx_)>t`riufH&H&_FXU_)m`dwfF;2_@-I}4p@Wvbc<-HSksVf96eF| z%pODV5wKQ4*A==D1Y%f!Y$wSUG6TXUOTzbhyVo9mn~OQ6#zrKqM%9%offDlQTG>U_ zg3FI7;A8AjZxZPEoZc~zZQr}R9_f6%vROQ2uX<&do%`tIfj50)BK%W+0GP^rT)_85 z=+V2tBMh32CFLq*GY480U_d<^a-ElEF~Kz&_DBn{>OE#S>G;-B>3J96GzAn*1^=|K z(5Pk`UvX?qrL>4vFIqR}Fl=a)sxzd<%%|POqDFR)DaW316qf_noC3L*7qDnQ<2Q5DI`rK~j)Uf@LSixt0E z!xoS#p!_YP1Em#>Rt2mZ_uJ9!j&;hqlygfdltpNXI%se(6^TFpXZKG41? zgilJXLKA-5ULlHI^T2evz?*8myx|~TGF~caHA;{9r+P;o8Qw(h3~+AE*6-Ljzfze6 zvfDXOE>pUYfzYr%oCDvNrb)__QpSR~uO6FEO`rKM@8CnMJV+`Yo9%)6#+u8TO;!EZ zOiYs<0k5vYx%-K~<;#Qo%K==5w|;)cHbm3FblIkwKV`mD(_~qHXfXwn8#UFPjQ}(jRAN&rPwlq$4)zciMX*%%eZK4a<$ zVP$cb3J~_J)qX$bt$a%y_-G|kU>oX>MN=%%0>f3eDs z50~=xu*K<4;GOwapop%epp2p!-shI482v#DR)r0CNv~nrH}jDiQQh~^im8#U5!HW< zVJj!PC~hvSfxdk(7-7jw-oTpg9zHF$d*M!&0*LQRBTF(ai8HAmsbtkk)`YvsEH>0i zZ8Auiiu5YJa^=#=7=wjQ{HZyx!U6A+IGt^sa%{NuomoU)C|Lx`q4gD4L90Y9+ z8ZIA1LsAu{H|baP9&^?4lGSjyU^f|l0i0hs|A5gcRvjGZ6IWP^cn+R735Qh$Rf3Eb zG9dNO`2Av_&1gID@ThVjj}L9sOo;bgn9Y|W=V)^J>V1{9LgO@Ix7l+TKS?hB=*+ix zf#SWItBU!ckGZ^)(<0!pG$-`rnuy`iT(1w&q+|O7Xh?=x#SA_8GupO3b-{oyt z;v~2Di~236M7J~U1`k$6DyY=gl3!}$+SFyyA4aiG5Saby8)KQ8#QoK_Wz;mg<1JUC z0R@UYW$u8XsI)D!Q!r#CsRSf?03($n!6rq}~$K+@s6qW?69q10YlYkeU$Q2VJ z9_vb1;!cE(5^2;Xtl^eo2Yg)dnBmm~G%@)NaW%;JJz##;M?0$}Bg_z!jNVOn65*(UtKx-EE^_s`*~ zf4aL&K*j4wAA$2>qOSV2 zf>$qt{27Gmpf&>MTq)MP31;k`Q*HR*MG1R9I z)Dv24I}k_=r^WZ&0^9!J!A*GX!pHh|sUe)$;Vg*jr~S$}>mzAuND3&F zcYj<<+CkU9@#wHIdhTuy=BD@I+-UA@v7|aPN(Obr;UDVR92U;CBM?NxJ{FI}AdjfC zKa}znDb`PAYkjd8MQV3_$<~?(mIi&5ENp|ip+WWFcsM*@bAg{?__qT#`Z9A+sXiIB zvx!_5j;=hJgR8DAycS+*#wE0_$LnHz!b40=_=>p|=GGFlof4u#@*N zlZ=#n1+`)x6@lM{ythJH+_bC+)oq4Bytu@%mwpm%r*DX9Ubq%v8YF_XSA_&3!_f+h zv3-r9ak@OA0}Q4F%99tTGo^o;5yRWnf4+xpiMjAWZHE4)XxFKWy*tPAbFDk72TAwR zR~5Lp_xuj+;CKBaIzqAut-FZeEJ9MMM)KTC^c-QZWfFLdiMZaEd&|5oOvaJd{F#3p z^-MpOwR%;notiQI{8YM(B=owL-nblVo3)2dFN`GFhX%2_?V>&d0)I-c@`l)rv$#d6U6w(y(4g>D%nSLgJ6cjf%ZQY^451Ov^L8YIn{CT2cZE0 zCM8=?7+U50-!}flFKL4DzQ7eL50E)&M$`(qC^I9>sd^XSqk-4kk+&!YJTQqtN#S^4 z!%#Nihc(MkbG{>jxc3i(%^hkGZfT%6QGZk;m8GQZ{X!qRu-)%q#K@GTHR2esRvY#)dk{!!_pxMvF}t%X-j*WAl#6-(GG_{ zsjJPg+u>)h?G;S&CLQdTMe+^yUi$5dz(EGl5J>b1`|W;=dk7sNth_OI^wa}|i_oMd zfkn3;B54Si$#T+>CNq0|V8Q?`?vn@FVJFC~W@r30mVHpnJTb)nZ6&89gH^(<7gQmXo(HQfM( zE=7t$6AIBzfgcm+lKsE3Sj%o%y_DR0>pDQluvT9O1HQ+E1@CVfLzMv2!cY}BZoGlY zR7MzoSrK2h`%rFAD%E?KyA_5#5f0|$hx3`QgC?=|2RI{S?am=`GrM9kRy&IYXw#&KgbyP<`<_<-`1AL$0ZbcxCQXCR?5D`RLl~<(8*`L zad^R6|7oYS!H6mNRtEv}Y>h-H%di(ymK9FUP(SJ9ek!AnJ&F&yU1U9R)t|+z4YRN1 z?>$5ak^%@x3+N{$xo2if`xe{|PTuWRZO(GC z8QA=_7q;?k4LV9p4D`B^$G%}z03-^Q!6mDurlLO|68u!NkvotL=A{e8K|3k$o_5SJ zxIr7|;LH4-QE>T;h?j@Xx5x9l^d{>jQH?InSC+sJqOK|N!Q1Iu-VDw8 zK<@DZ4x0~rfb|gXDTD3TT#G#G!mX9X7uKFIT{Ji=d|rZ zNn?Wxc*3r35fGl;uCYn>FKo|87BN|lrIFO?dRu1QDjwq&Z^#imf>pn+Uy@IJb@_l( z)c+zRU?YF`RX75`cX^ISfk?VwKeg*;2JPo|#cP^v55VO_eS&U74zUeCaDJ-C9X1Ns zItjZvd1LqPf?mOr7{Ie;Y)B~Bb=luK8x|N6dtf%s$t!Lvdw5IL9OXTQopG5OCa16| z*7o}D3OW$U4Up36lA?WrmT!iZ=LEi0PHqhxK|j3>hPmhLymX}0(KVWV5eCRM4O>u^ z6uZe2fF07bT|@z1GG9sB8>uJStWVS)L}e_V{98Rm+CHDvH@@%Iq0QlXXp|Qpsby7m zfw{MO6&u|%&ufnNXvGhoe=)9<2mv)+h0RtvhLgAN_EF(mr*Asw^^*V(*=7Ud*&FKa z4~l;`1Ug(NJt`uXh%fT#2U;c17S6|nNmYCGz)Ls9)a0(N{6#tid|(-_y8!JbSsXtI zO%5?HVNBQ-F~P_<$F<_@n@H^03|p|55JjG#XI$|$`i6qN*gJOkKa^sQfjg0`PX=0k zB8~3QN&y4(H+RuD(0cQ}H+Zx|Gc>uWdA5)*t@7p}BmXmlt3KA=evr@t>Z1T+Ct++C zP|gDKae1M3}Z4a$fp}{n)KdUQG9nP7gyrJQ4Ln6AH@$WRSf=~Pp8R_e?%!o z_nBb1$J;Y>!-KKoCAZ)}P+uE;;C8>K?(3%aw7PeA)Tw6t+(}^|uoRi#{JW2z8ct?0 z0;*xa+^+y7xbx2`e?B;W@>UIu7w12GkR+d6$7wSB-9Poqk$Y@3M_1IqO~0y~5_=|% zh2TLhr|3DUH1h{DZUuxPivkjLA1H|;80xFgZmlHMplFOBi3C~y7AltvrQ<--ut74T zY7lC^Bf1LWFdaar#5pSFkMpPA`Vc!h4K3PKuBkM>+w!*xY02+kXJ*Aej<)zS4KE_u z%pJe?OuEou@S*bp?fFmu>aW8d5g9765^UoAsk7T}!d?f)ZWiAK!Q`8FY5B!mVt4wDrVRN)~5{PheWk-u}9;+nEU-rgNXLSm;#iQy=`OinISkV*>mO(ET*SiB zgRh+4YnuHj8$196i4zfv1jiB6^C5m6@_KMpV~Cg~zlU-8jNSpakQiw@%mTi2Otioc z3Cz4V8czw;_RE$)JZ@F+tYC2E!J8wDym(^(l!X_z{m&xV{2oJ9;LBO&Jzk!#beMwK z9Yf$`Ic8tyz>Q0_j^^0SeT2P#sWwDXPTr(6MIPu`2Rz`SyEro85(1NAL+7p0aj)qB z4JNyji$On(|S^w8G~P- zHUfSza04vuxN`kD3z2*RUvoiQbxHUm9n@iqyA!I&fk(_pTJ};rn1_LuXHXsuABE@K z`9sI2qxS}&>T{`op^p;)ziE~gp4%Flr1Csp|6nvKkwmdY3FjeF1Vwct4d$u+eicVb z!9B$#QCe2+L??RuPX*^`i8R;?**%tYP7n^|QWB^pb&bWZd(Cx4oF_;P3jMNu1PK-~ zW=(*%6^BOH=M-MUgmnuO5@rMma8(IG9^){i&#{7*yFu@a{Mw-&6L>!B z2ZE4_EnXBN+=M z=;r_W4(^qgu}{tV$B&Sni27www1*Ia!5zzVT-hYVvAfhir_;mW=8;s?_{Az)IMpWM`Gg|(2KBppP&zLyU4W+ z*z&$-*SK()UvrAs_kieB@ZJq%v<4-<@I23DtamaB%=hZz|>iDW-Fn6(FUf-Q4KA2Sm&;aCwRM9Bnk2-WW{Fk5fdu4AD%I;uAtg!EhwfL4* zKg!mNiL%8HWDJXypKPQ6iUT0!W=i;tVA-1H6N!o!`kJ@)8CwtJf%pYWA@$A`8QSzLq({+3}Q0;Iespp*;u++i|egSI{G3Og}WggOFt|wVavZ%y8sWw0xtyGUBaJ^ z-mY|yd#kPX8$llNK38YsL!XO0;Ioc-NBDOmKG%r{)DAUfIUG+C%d`|Kkgi7kW<}Rl zZ%J|{*GT;}9-~C0Xw}Z`4!RyRXbC?V5K%HCH zT~7DzMs;M{NCr|cJy2`McRR5XLaK}uN(UFoz=AcLM?R`?HUQhX*~VV45U!^%{Tybu zU3Tz2Lg2M>Z1nGiP*zoB@-XaNhb2;Urcc*r2`Pyll_l1k(|zit1bE*XPP5EN7N?jK zTlr-p?Oq^m)6xX~B2V4`_o-*UM2@v)v(LNq%&nD(+m!=ZHBeTqvo7Eynm3vUV2>yh{I`lM9jho=0;WG&B~sJ(3tLPGTreD^GR8|SO$Yw>jK z-W0z1UOTDtO8FI3Z<5ro%@mK`SmV; z@SMyHLf`R(5KwbqXNSAgh&TZk?IV?W*R+gTckCNLw~~Dt8v$KfHdl*JJ z1i~D(Te8hfDmM+DVl|^UM-r!jX%Dfctg6KQe7o39g5LtZwiQVDQ%M97c$xZe_^Ldt zduOubvB0m3OU{iCXRY`ScRh2!GvT#5PLPm|lCciQ~a`RG3O}wcp)zw<4IqlT^^QFcK zX@{EW6*H-lC2&5%u<@2XIK;ojOkevc{OGgN4(Q>yj=DMa!We0uCt}sFLa4cDd*Af0 zbr+J;F={QlDp%0T0}(Nt3~D7f<+nAaxHchq66JC_?vBlG(P_#6Je6*zo!`uonUQNd z^!Ms)`1kVL0Na9W#K=cpSC;MfpxvJG=cUs}ehbgqVnN&okEsg-$RzOki+yi0*6K%4 zz>{Lesg!`(L$6#+#&p?ZVBdR@h{CK3jya7nDJ;VbrIjCwC-mr}m$l&nes@j(Ak2=J z^+=AmE8=zt;g&D_x{=dtoe9embM`hi+v6^?zzmOX>KLbqaEYYDCrt8OcVNFQ!(n3c zyS@N%%G;w)B=cEL8lZ(-<7@gc!y20nIN>pU=!K1#5ijH3O^MO>`4Cz|&fRRaqQ5u9 z$-gPb$=egC6YWY4Q|4ILdJm-OU**biFT+R4LwbF4fLm4>PwN!a)PQoAIZ)4wYkvkC-Gy#vA%jl1k-f@{Rz#FL&ID?!g zA-vJ6&gJ)L{_%D5ee(G`k@Yu%Ihg14>@jaVJbUb9o2*`jwwkWL5FOkpScN0{e_#9@9vK^# zag-qkC0Oxj9kGak7f)c}ppSG1mjA&2Wu+hECuZa#PrDu+laj+pNluE-gfq*Ql{4*_ z=r@AClM9RdZa$%}8I0E!f+wRxirR+TFDqs2giL5xM?Qdf%E}pdjgym1>((fd{#R`I zvyG3_7H0DSdr_4cFY08{dP~M(U$6})svUKeUWJ%n)>hM0svNpw&g&Ps-nFD*eB3;7 zSq1BT<~eUDA9)}wg7n0s46Jl^>OOW#at0m;2Q?)-&-iZ^juDjPz1nsKVJY2TnzrT< zM#9{4R@QP17XN_%iU%YwXDYV9fPf4jBv`KC0X}L7)@VmKUAJycNR-fe-jjsgSoo+! z4&E^c(Fwv+urXMa(`uC6oOC>ru)2aAMu+h+guF z^vZN68f-}VP{B-7-6dD{MQce@bUq7mp@b!=tEu}SN4{o!-> z(OQ_=!TCZNFn4s)vawc2;vz5O1*)^;B;srBS_p1Ryb~2kIou)@O%D9F^Th)t0Q9ac zKbcx28iT!JVr;~;{%{4WC6w?S!FbG45PrNdiZyog)QEWF!-N?d42`f>6Y_-*kG;^u zIKuHA+B33j)C|rT+n6Ex`({xp<|%VB@?LRiY1E4e89Ye@mWi)lN=8vt#oxU@RLrIR z4qCSGhlZ%Ty(C{f1<2d2byhs-08~GE@wNr1$Bhy%{Y0u~o!{PYuQV)NE96WxIkmq> zb1kGcS^!&g*lY3>k*76ZzBFRZ5bgBqDI_Q|**@vOWZs7pN1c=4IX?31<&)wIh~Pfe z$dEj2q84@o>G$8>rM=KCRz*QDOyqPsoO}OW5CX9S}W-271casLQ7Dx#@b&D{B`=;B+4XS2crt+;%bT^+!eYKTWufRsR%$ z(R&clSjZjdu-f7KC+IhA`G$Rszl~npEez>4(2N8f?gnusAy6K?aJnMkB27yy|4s{6 zOzwBF$xy^+&B-UEf&Ic48p`_+ysgtiZiQVXEXsHS=b!L;I2h`c_!f@ZkY9#VeZ`B) zP-XUrpN)Of0A{2%G6E5Uc`pO>G6q|F=KiF5bWLUr-s6-{aAJ#C938*4NTL?(2(;B1 zUXVw*9I6qt@%1}tbrAjjOW8h4B(U^ZqxFLfId{c2p8Q4w2sSz||7{r8r-H%*f)XOL z{l;YM?+G{B*ALa)<5>W*gk(q#y8PyR|MNah8XoMnFd&Z6PC^7WRWz}G;o%7e^DnDx zi(5*akRydC3o~(QDu~~sFdy!AP~9d|>lTM(Wvpp`Ema?AxLld2%ctRnZJS@fbD8+y+B(MHjx7=)0nlYr+F{Jnd z_F8<s|rR(HXG2h=4TKEgAaNj_0x-3fpm4(u*nL6 zXU#74%?;u;f5{w@*IfHN#GA3DTS&iM{u|_SGwhfOcTFV~uyMWZZ$Sn4UaREbxFZr zA9XK{1j&cz{BK1^hIk4v0KHDv?A_hG;g-=i!a*zD<&X1S<#WpU6~+z~&bycJEf*Pn z!j{(aZAzZ_IpAqT=z6kSuDfw(i5~oh^}y;cu&^-J5?yx|NST1AE8tp3{TSHXeXso- zxzzREpf*Kl!`orr`-)ZrFVOT^k=MKx@9Pk~_|Zgp#pms4{#;CNYDVhTIq!A%5__%o zZxEs6`rD9h*DBxDZUulP&;NQoxAai$?Aq0|`#eiasC|)-I=}v7kJG2~EHE~fPosGk z_=hr-4tyTJ6+{+ln~z6Ep^dnBJ&zTAv!naG`r+o(e1CjE`?>zOe?)NdbQW_Z`uEe_ z^;3G6QGoky$LqNXOXAaEzmrwKdr$r>Y2e^njn9_~vhGcDC))Mt`pmd-l_KytC;jxg z1i0)pqc@(U-STX9Iy{c%$hQo+`tJPzn%76OMCb_{H!gUV9}dynvbnvTj5i($Bz{a? zZoy3~%oY;jw98kVdk&owp!i%=@-IA4=)WRswcAj#w$Cq)Xg8}KF`1@wr!AGh4aB<{ zQ~jA!sDj2&-a#0=JCXPTgH0Z$_%z>+MxvpKkXm9cuF?Y`PwkEIW#;8V@R#twphovg zf_SUC%!yZ6pfqj9KQym+!+i%aa!k#%!u5!BhD27!kaXo=td74F4`c{>H7P_qpqgW? zwNutPZtT6|)0b#NJO{g`PmQV8*RaBWWi3x3MpH;|AlPLWo;pTQ2hGlOouA%~-yGU# zotrEzDi|xk%d1&|p!YpMW%zQ7K|J`~e_`^3bkJ03R7V6MWyR0>Z>t0{RR(2G7|b3)7QEZ+Ls=rFtb?1Djv!#qiRtUj_}ED+afv z-n&@(Fz&r^WZA<22h=oiah7KC0R+gwfilGcwIAEbKrvFu_B`d8EteJ>gBO zxLRU`v5-JSzibQ>`nrwdxPj6<pZuK$;v7O z=9gORe2PRcIr1`0s*dur6B_0x=r{}}I7g5n1biNRudVsrM-rYliZCRY?LRkc;Ozw; z2@&(5ZAra-W_T@%i-~vp%zPrU&9@=1f^9JgThmX#>7l??NCdYMZs(*og-3idcAOQ1 zkl#w$ZtISc`8Du4yH28ctnqLPUvVj!G$kdQr<;k!S1{{DgYV|HD6a@s-up{7k-%b9 znf$l9ypx?VF=gk_uAjl@?Ojw1cmn^%<@08gGpZk~z0_A(U48!9|GVgvm;A!9zTT}8 zHvFifH6zvgvybT6_Uc)k8j@4##`Yv1+a&ALh|68f#{o|OUuXw!!qClMH_y$C%q}p! zvHtMaeJK)LbK~t>;;WHrQ#RgK9jC9ECE}$`3F($)MRz8H=h5|=9zn}5fN!bx#dM+1 zYNF_AUvdk*aiWVBZ)xzZA1^ICZ)?6`@t0JP-5Uh4Aru zQ5(B^?;rKQbPFcbd)`;NTh1= z^iXC9p$EV-QI=6_PGEM++C0ic95OOe!O^PuV* zQ{y{i^J{KCNjr{ydBw^v04g6 zn^&DU(K&?Y=LaYe%N1wAp^#gE4N*`Dvasbi>sXuGpAF4nM>+vdYfqDA6AI7Eh%=Qc z%?{I-!Sst{moF9T5kIW5&0y|Uo6$P;z?FV60A!7q;{qGzUcIGay5vVjlj@Amm`gtI zsB;WtGeJeCT-c)Lr1~23c=)H{WDRK>wZEUJgU0^gr z-N*zrIh$lWb&0h7{Eq}6bzUivPKdmYqO>LXGzJmBJjFsv^BOJ6PW{N&`jZ$3xaIE^ zfGukY`uB}$OhjK#G5M3sZ7RH8Dy~C{jNOevk16S2lz`(AS$2TZx;rG~M~JgVe}3XL zn|Qs9+@ne=?yfMK>?PQy)C`-siAmfyYG;`O7W}AD98CE+V{bp8xHi>TBYwQEtVCT? zuDBrLuW)@S6IHw+7Fp4VdSYIEYRKgvXsJL@tvfrITgV`MYCkF_bPW|M>B**8h(6BF zV;Y6FUCJCXN>Y5lpj8|b)UyaY8ZrTt6aF|#^3fqxDx9cL0gau?gflOA~mOl!uP z#nO2>gID7G616$7>(b=!zKzgCeC?8ML*?B$!ok+n1rGL4djMbhw@q32AkHPopoTB+ zKkDxR4?b6Z0`(K(2{p2U@Yj#Q3fM#-RNHWVUzo?I8ZZ_Bln_F@eV?#(K!VW$!;jIy z;Jr}mkyH?!3FmwYDih%9QRwl=#=j%cd5P{>6TK$-fjPgIQ)}|9{mJ2%<*h@VB;{Fl zH~QSEc?!h0xOX+LJsTfvrF?qdk2MJId@)N78yJ1pKX;Z&z&x)S_!;UCe!261yEGyx z?LA6EoyP&dqrUjmV0e&xNYK}TkXw)XSs&|%*&@Ay`z;5~2ol22SX0dlW!({vq$Pwo zm)@^CGc&8j%E6s8w%%bw5*rjb*7TQQxM2^)auplq$D1X*M7vEs{Wqsn#>w=dUbU(c zieqKVHvMeVYjtiv^{1E=yzgfY7vFb#5Bu-APxXN5n{TU1A9Boqm4SuBvrF8QyG?gX z>$`qQ%!_4m%|7jz^a4-Pk2N0X_Fh+G#_pE#4enFdZ~dK+7mJGS4woCPVwc~ZfA#Z5 zCFXsmw3SM8FG$>ZH{U0(JLvY$OKh<5hd4H})6mjn2**wzHHTjqj7{-q} zC2b)L7j8Yc9s3~s|7cW`1=gXqFC~a9LH3jaz;naszWv;7=WKmAcS@(NMO5u zHnNzlz7UZ7&R9U0M<`) zh}MX>$t<9vS5Y8^A_?Kgqf~SPcL&g6^Z!RR#KiQFD@@xkSQ+(4(a|X z?FgzpE>Ne*NYR%zl-qt~W`)Jfh`GWsFJYqGWs=wz422{sbRpqpSQ|CwQe85+D%fl? zq*hJoE{Ge83q^9y)<-Nn7uBV>b?(?u7tITU`n`I6wqz>7kArzf#2XqV-t5+37# zP7Q-J3{Br5-Tm&BAkc4s9BfhrI;(ke;;S&|Vv`;9&iw)zPIiVV zzIs4Xu~uB-K-3ac95-1u^QZZo2wWG-w&+DWjx)D_#bkM{!W9x5syI0xLEF&Dj(3nGfUqqCAoB8z6iBZ|rQP{Q10Y73t!lsVX*|GW#E`nQo!bSK})1H;u}gn|T)ubee?{&LMvLI19y_45}~=n)38Gt-mh5;MGA$cvmir}!HPodjFDAz7^+ZG4!FgKNYiB$Z?ZTuR97} zq?lKqJfLfw(mB9@4SowCBMZn@_xSp6@G~mxpD^+B@HZ@?&9p}wt^!GgiL?3&YPIHy zRNr=29N&u2;+XA301CU>@RUg5bRm+Qt#3j^i&Jo!IRh*t`l7V&F7j&3U|XOBpyuSX z4kURr_PxGckTVL|b3>TIS}iA+?zP$t5SwE^k$Ohs5X?h7wfz6VAlvnW{!!h=HMb?N z6OqxM2M#$c~-4 z$&hCYBjJTXL9ty2pIw(-sx!`YkaBCFw^aMR8F$I)qDjp5MsCOkkQ4jZTW^hqD`5%P z@^Nc^Og&#QtN^{6-Q@EQK#3 z{PWPnA!~R{0Z_r}UR@eSnqIfe>>%pP^7Bi+e4cb$T>NG{p|5sk*|Hd61uvIS%jZz@W+ke~W9>xw?3j^B-$$tf(ugqdtuMe`kTXv6 zu>rA=0D`29oJd-S52hYI2ucHSc7GLi5^kvrRn(C=9O&F=j3mmc?u0f@vbusiak{7w zf?B^a$Vd9M(y!Wf{#RVckSHn=&F!LMLznDcf72!M|1mOnlNR2vvszmt>^d?Bg0g?1^~Piq zIs<$9XLXAu&{x1MB_nXbBfv=De)dvg`*sOy;Q8y(7H{A<#a`_8@c!{hCPQTKBtBAe39h0d<6$1zloyNg`l zewuVl_u*M{%B`dmi7ooQ)8VFw@y-3d?mZUi9$)!K$A;I37FN!y^QY!>EMADN`Or)x zfY5#s-Y@d)8YsbH?DV*ARO5GACB$Cge;?gx&(U~OOF^-IJx_4+F=mdgp}xv|9&32E zLb9`-0apLnoNdTwpR3*+y^#Fg^1j@F1unM@X%BpwJ}jgA0Jo6?1U=i1YtON?L)n^a zF1H&(2|Idknf2EfpUAYM9L?7Sa>8!Ff&1}wkbR0>F(aBSZ3+wq81gPRtDjkT6WyUqPJk5)2szEb>MVBO`6mXZK%a$~i@%v))yPa|h<3K?j6OSeG*&&UJZk2W|> zl1$AoZiRLQH_dh?d_xT9Eo3P%H8b+mW#zG?aM(o_lAYbZFFCU(NZ2QjD0J27BxL+3 za+R}{99NJoa3rHak_b`yt?tapujH_MiMRDTeVx0`CZv4Kq6u9x7Tzp$wmG)>BGWOp zbB*+xnX75Ypk0lf{(rbSr|3+&Xlv(~w6~b~?rvr;}97j_r%68YeP8(Np76F8l`71kN8@c(mi?69@ND`W_>>gV};bQoVh zy`J^AQB-NJ@>EU892`@u5lVPyh$b=Mad@aL8Y$LDz;DUCQ(b{Mq;;1*UIt^iDL?OReVr z^FGY<5aPJ6IJIQtcCe5Qv49yH`K>*2@@DgBqcFzIR@UovlAdS&Zyc4IEf)-8jqT9P z0w9vLEIq~oM=Xj}J!KtoXt}7kI`-%aq}D1g%$%VskMk8|h?3B87W9={WNfg7rU^Ne{h zqNAtNxIqPO22%yjJVEz!h!tfQvw)h^Mn_v00_2>WrWgl4#aKuvJIQT3r9>$C9kQXp zKx;^|kb}d4jT5o5i9jZfQxomu`({Rek~kWVt=XC&on)RdkDe@!lxD0Rwl5$U2MA!} zL0PiD3(lJtDbFP4m4$5oHYs~|Cb0dEqiAaA7$_zS(k}anu}}poB}^7c>xpJ385uRU zkn9qWjHz6Lo#ZwktOGd3?Dfo!id*DJ0)CHp0 z#utJxi1my+48j!X+DS&f5QC9(QLyqMA40zhAde!hny z;hSX(SbTdTP6L<>S+YwLkYN}#C7e2lt@REcX-Mr)3)>=6(S;slNn*_&fR~kexU^tg z@!T#F0VIH-pB-kzcFRhS1?&c{SPYCUV;Z)$z{%oZNi)Yv)AjwA+#pn&3PGAN%UPql zx1AxJf(oyqv>}&l@2~#L7`hVTU57nDdJ`MibQRcpl595Rew5NC!U+fJCgjP*^?C*5EaYLjWmSfl z&^}Qe8mx>S+hi~)H)QW7NFV?lCx*n|X_ca?kPlDX2$mre_=ei-ZAQi-AZI}#dc%w% z3G*!QOd)IUD&jXSb}RBivqcuB@$G)lBMtQ)!XF&67UVnrMINU9%{KhzLrUn{ymh$t zF`jAq#v3i0{YJM}DguSfdUu+~EXpom&8|N4Zki4CRpZ9j7Nv%7z{uOW)bSP1!kT{kT16{c}NfAqpq!+`mz zfuiWuo^Ns$h{^9zzP)-QuywQ}DD*z?A*P#>a_-a~dU4}@TaQbBya?4%Y{TmsIsx@s zP`v@Xb|r|q$4CsCO8#V=0$v8*cMzD?>z|i&U?LrWug1GOu1^<-@$vTE7QkJcXiYE`?Q^u&-eB6VGVZ*fIa*91)|1g&v;EWd>0UGR*M5Da zVzR))%}LWQ!{1X~Z#g*z8DAUI0xXSc^)5H1m$KCxuD08yeJIm>ACr^B2=6;y^SBS; z`QD;|`0HkFZjft;^Rx1Z?I_!jh8?c3i-9^j?fK)p zUeclH%Gbkg13{f|@#XCw{}fOB>oe+S%bZxMV%PgxdePla#zNu?{9mtqrjjAuUvc=Z z2*+6q#e%xd*G#YE`cIj!qY3!Fo$hzhhM#-Dbu^RD)8w$B?Z;TX5#;~Q^!Y@uE3_%xqF}0|bp_iqVgw5VMt~mcW*ftt0M$ zkEM&EwYQ1w6M=fWp?iU)g(FEbM}0i7+37wftEnh%>eLD>T|D|(tj=5#sAHaW=5tAy z{LVjWOgm-2|H4mFtsN_`v|g~=;FQ&n$jILx`JJ{*wwh;s;4&QEv*Sa0~`|_mz&ydub+lb%i|FMA!(cAoK@c-Gs^#3n<1`I4=;0*@|(n848RZp9q9A&3I z$$Yf1KfV|Lbt0e3EdT58x`@RZ?N2JU&JNsEEajjy_Et8eNja1F69}<3jE21Nav_kX zhLYy8K2`~eS(0nzSQSZmSOiU2-fsQ(x{{iZv!vXc_tfE=>adlT<^=VzK}Yjn<%%jLzhe?5 z^79Q$NAS1MXrp4(OazMbaLqk*#tOaii3m#jO#L86jf0I#(}1y6CI!gcmU*u;2Eb1rQyUxC*DQXdM}~`j4h1=og(=}chrj#S&Gqz|6mb^;8nL| zykNWeCIx6e3G)&yMig~xAy05l!gLk;Dq_eN>%|y41@ha$3Kb}~)iMgO_EuH3E}Jzg z0Z}euft+ps2sTu^pldhvyS+haC3Hnf#2mRm9@M;!3=w)kW%~v4L6uxI{8IIy5E?1n zp^i161lvw+!8SU*v;}7^u3G$iBN;A);SCZX{)9ktYzQI}V!Bf&UqFq1kt|qdR+Uz) zo<>|yPq4kKee?R?UT1%jzVw=E6jrI=dfzM?JdK4FjUwXqnT#x6dk)^6-X1tB?XZ zcua}Z5k&p)UFbyBDOK0pEJZaLQSaG)W(BW$;GB~QR$H*;7cQ-_ZMKuA948zn`F@;! zZf%9g(;Sf6S+_8*HRqGEie50XMJY`jfpLu|6SaRDhXa-f5bEY-=Ol;st&6S7Ury3W zYrX7*n)yBeihV?B?82WEf+fXH7biez8!JQ?^M;zi@Tq8Ki2lU-rnIR382J2ma^lUGb~vWIIk6U5p_3|YfYTq(DP+GO)bXOzYdR!zTGgN z{aB~iH7-AMOb81l*(DA0_}d%WkdY~hdldUknXBQA1tA1wT+eJF5p7LuIeUO=+9aW% zs*}Edh+g%04J|zbZ`r56u7F)7F7}!I8g*A^6j5bFO2Wly5`#Vf)2;xp(ZcPKk!Osm z@oB^5C{#?ADl`68?%;>`=0bP|Z;e6r+vKS6*yNYg9vtjk^jFi8QrPp*QGE%IpPgY! z6! zK(%^vA!c~`uYdVlRX1+HAGtq=ye1%3OG`rB#&7{!$2Y19ZIXUmTtkbAc$2sx?0~*_ zJi|mxSuGoNDzb1h6w?fIDoO@PJfC<>2;6m$ELIXGDGbQ+oBu8KiV3am^uU}Ou44G$ z&B3lm6w}hA5)hYXk(wt_6!B&ozM@=)GdJ3E$bbs^MSR6Hh^!9$yL_2Dr=FXv<;Q@3 zP-!=qQIHdoEB}^qDrC(%7}9J}=*A4uCZ#L24&sO-s3 zo8WQfI>7w)5Gl7OgDmLHd#&%9&qW-n*ENV{#&q+>U8)+=8Aqed=P3Aoj#l*2B{S(C zLoN45|;lM_^&8VN_q~6shvg~ebyNxz%{&7Wm=|OqOJ{ikVFlH^W&q*_tSsbzwMW@fvSYLYd? zR@u)9MIjSBZo20$mIAwDS&1CzF@UK754ZA+OUu1V=0bge~V2=2rTs@mW$rw*Q$HyUiQ!V%rQlfmT_FxtJx~; zy%Nk$7re3Vyx-2p#EoUjZ=adyx<`wx7g0FBJIcJ;|46BDj>_#@HTI>?DKtrYxg1#K z3Qq!XhYcN}8Ki2(;7gaHRZDY}2e|_xdXFF)z~IFH_@&JE!aNcxT){(Z!}%ph z+5mM1ZL}TvV-V|ltxs-&t)|{_L`69P3o0ZN`Qe$>k{|@!SjEr#9=WCktb%bW!h8Fc zPcJ%c(bW7utGMZk=`ip1-vS7Q(R&QZJ7sGsvh27O2$anFw&@Q-tUYI_ZX+YdZFDlXRfjex-k;^ z_V-zVjDj_bksN~Jn;mIyqCijf75HGrAbEH-`qBn!Npn^6qsZblFb?T50m((ql}C@N zJIZTqG`UR>m4SwBpn8Q|F^$f@ch&%C<6aQcuH5ZuBjgg8E-=Wig{<0!Pf#iM(5&;x z=+Qtn^}4^Uy&;XTx`VcjZQu)UX8(*7tqADC977uJW8GsoPH{@k(8``B9Y41b?CQfm zKaK`bR;t0=35Apt;6OCVl{?l%yN2ETme5LRuD_G(lH1$Uz9FJX#y`h6(8C6Z>Rmlr zxUkpOGv-Sx@-F&MU&QKq;?u$iT$KJJO-l-5k{v`j$VMhD~z85mi+; z@FE%Y3sMF}5G}e~f067(I!z1+$rF^@m`oiyGpmQLU2%E8h}f+h zgs$llK{#XQUT@#NGf;3PM#;C{qPoZHN5Uo1<3SAa3 zas6F)mRbkxer4s%?9@-MEB1%9$(bT{sat%}0_*i4Zbvj%%YLy?^HTsY?e_dY*S$oa ziz!>Wrq#3}nzc;iV&HkxdQ2o}j`}->T2-xIW-IW05b|j8#wVv{xHt9OP6oHy7X7_u z9uq;#9(0;S`(dwz4)au9c0sU9&44#TZ6}g#Um+(!R7b%-EyS=b#3Yw67*D*tAB<7| zsC|e5e)09ozE{An{fEB*_~jMUQ#+c?g7WIFz}$<#afj|YMCaAwGryL19)%DifBfg_9wtU~ahRzk-0m1_5wO-vYM~FdwY*>lQQHEhnSp@f(GzVzxY$ngYR>)P4j{7c^jhDQM2V2C;)*;|GGBe6G%>x(2i-G7sI0b6r;JO0 z(Xjs=IMLynb6_?}F$Rq|B|yU4N0x$hDs(EUU(GM@mTzXAZWL~wE&dAhaxZ_9co5K+ zmd^4+Vta?UUx(rUQf^Y_-L-r-4(I{@5^J)_Pk`8XCI>S>+0NH?lH#dV^yBmk^MKo z#raL8TzUW(m0G#v6IZysHrU7JA;* zF93r5P5915!&^02MuX4v9w#l6JiAw4R3+AIq*#&bZ68-qDtqafi|_{N7TM>h6?5|i zr6oWG&aA8!;y}I0BN~3*`82=3*|Ef1cW*1eNnHxaSc7qOJ>XXD-Ql5A*aAT9e@T|^ zv~PSMBZ-|);XVBs3`p#mXzaJ3ny;+h+ZU8`T|)=Ei!TM$P;(rTpWmXNkw;&^oxVUV zThWb77K11mB5{{k!_a$?leWIcVZ^8z#QyytM-STX29G>i(%osr)#^pZ0?E^|limhy z12JLKF}+8~$1gG(?`A)mp@CzWSlg)*&Gc|!H;fqlMp4+CI?Qk~!f=9pj!w5jLc&WP z_v9z@+%(50+m=+d$a#(;?#ZIXP`BfNV)%`XS%G1|oBs81b=~eVqQQkR?!nUU1K>*J zt|j>ri2FmEoVFmKs!2?ouJyTw)=iPz4Qh@NTH3N0yrROi`^*+K1T4}|e2>DmPzimQ zI`CEAe-?8GbX~#XBmajPZjbU<2U>TGHrvJ5y`T$M?*Y{Xj0!)5FC1p=LwEDbmodq2 z-~V##fQj$O#srMOF+s}Hu|y%W#IoOo4IHrcQKFyu;clTvE}>Qd*9b0>5qud`_CHjN z|9yLU=lcCDk(>W(0uXzxWaePj$C!VHn9U&Z1^e|?tJ(p#%SGMDoEwbT;E~XPQ!*x8 zq_bH-KVzqN_}XLbb9Z6+vHyX~pd0{(f9;~^6Chgrg7xyraPTJB`vfdK=N#i}=2&Y@ zvp*sQy;t|&FD6F}=Y*?ZyC?8|*iw+k3tmW*w^^W0oQ{{g0?es-zvRe*`+nTX5}(BU zz3LCEZ>EXe47o+=gZC-T z0?#ox+0n&N)AW33e3l?la7CQy^=)2({)C=+te%*DHYS|;SEybO(roeGXAeHvYs+2_ zD==yS>h~DnpAYsg&_-VafiH0t{}U0{g+<0HT#1fyZ%zRtL)Kn4QG}Ak0@`$9P*3eF zUcNk0_HuSmM(y=)o(XEHC3$7i5>;hm8%@ESE9IM@q(l4G^tG%Qkji9aNA!pzWB{xulFpk7F0p6gZEqU~N_{r5f>wI{uBPDqbmZ3SvY__QEj zGSa963=zRF(WR@nBgAxb0blJfp^H9bmNdaHPBL3?LTBQzm{z9a478@$!!Ardtu1JR zv%kIQVW?tbY^fLkzVy$8rBKnaqj4=`SUr^qM@UK>Sui__0GuWG;kZ66?k+@bgK5I; z-p?yQdhZg15}hQabLUf#DqZlPNOq8b=-lfCPaDm8Y#3b0|*nX+C+= zuJ*fiUIFcKP5As{#t@m{Yl_q|KmfuRYrh2i0y5JaoZtu)K-VXI4FupoF4k4Ta0JI* z!5kOUWR477jXAaW!rZ{_+T~Nf3c`PC&d>p<&>C?b^xTr^16QN2i z-k5;y3F_02iX3nbXV_sL$j@?a@$+Jr5EBNZ-Q^G7=^3AXB0bvl{BN&a3-InllaD?- zix+$;wGs^9`+KjzOlRB54oz`Gii>h`o%*Jm#^j%0sFBq_u{=sWp@WwEFTeul z!gns|br7ozR|?j%z62bGc58hKe%r%syLqeIJ#z-|>5=f%@Y}r(2w2X~7oOBrSc)$` zCLfBXdr7;yXDa$`SUPl zdI4zabwp?874^6 zyjXQpc0cQDH$0oeHjvZ3?4(v^(%BSLb-urk%tGWSQ>jgg2zDC1BmmxwJ2TA7qXiWX z@P{^};)ba;^tksk&J@B0maR-TeM;F%`Qz9Qzs?pQ^XG3d_HuC*Q&Qb)#|ZUx;CWs1 zaBm-#=W*o`xVTnA(CQ6TRMQ<+{+8D1k#Fjn-yIV8qVYNUnl0UJT5~MT-+~_A4)n1` zNgD+^#_mf-QtqaH8vwMl?QM;;R+H%Y`2N+M9}1JK25*+*(U1E*OQ)Cp$=VZ!hf5fI z>@Am8rq09X@&BcI{G3TRX5)mThS|MrA76U+{{HO@`U_{Z+Dtnx3pm5dfasj zkG(=Lh0BVo<~%SA#ilAKP}YHNbw5!5l~W&8$zkd%fu)|EErF}hs33vaD&Qm$Fz7l& z?fl?6+$fpD>4>l2xZuVNTr}s_VmidI`W11xo@SL6Mry5T2LnT$R;t zZ-QT+1|LJG@Njhhz)q|(!iYOm*Hks)QI0r0m>3YiUy4iw^1&e`z84PSn;|AA_%}>4 z7NPM)v9k*QB;?0qftO5G?tYteRZ{78sW3NeHeC=z4-*MRPqI=-4gS4K z5cjr6cLV?x{V&al+aS<#+rxJYG~9X>6@kK??Ve0U&N99EQ%CcJujD9j;nz=DJfg|{}3>y9*>Zv^h zf7)Hbb&-Ump1D9>0&fl$n9xMe*4 zId8{3*d+82_F&5%h#e_sqVKJ+cYf2L=#OXmT&QVtDm~LI3#O#q`la}w@M3fcWAq5+ zZ@mM-eiHYTmW>@Y>=ADvoV=Aa&f3BrE%_N>{N%xd=)RQa#qi=PRZ1ki;7RgwM`42^ zPQV|rp(D1z_8MVrnuNMm6QbM!xI_4wyq*IzwJJYMQd8%Zpfb6h!yrc}0=HDMQ`34l z4j5vtm`4AM^(CGgSXTr#X9TwI&JurG3xgNo&zPt%4Tq!r?X#y0_)M&6G|L>O>YCt< zYd$X?*BXf0k~$|fxl^@66wUAl(PWj{_yChJ`1z!w?=?jf2e&-NW5;r`K0Rv2bE)%b zcd3WyrLsY+ERp@>IQPt?Iif%RdC{m7>0jA{O|4`IH9M4s(%ayDm)k%(Ldes55q{2K?YF)T`OIOjngj* zYc}T(5&$xKL3=e)xyEAE>cJQtinc}aaiVhc#UkaQ|9kv{M9 zrDdqZ;vnT9NK8|mlGKo6b^&-mDAz^g!*tj4LU!LT?0#VSBYW%aBm09WOIM0~|LDJ$ z6Y3(u`e@WUWDyz;ODjrXkdBjno<(+2W{@#?(o3}3DtTHR+)iSC)~2aD58t{LyySvK zQ)}4TdNV?$o}=1AyJ&HvXla5HajsCCV^Hh%hx$0*8N)LB;Pt>>Ct;}z{x_Q@3+|mS z20w<3vO|8|#c_4Mq;z<^BISjtlcs!p*$qnh(&p3cV?ohU8$D^D_OHxhS)*LJ_1XwJ?cL&S#wyX)s+;oFon}`dHv!XIh7kP6cxVFckyqD$bcgT&e~_KRNk7XA*S$ za2ml-pKoyp-wwmVQrU)?=dXjdFwz>ca4og^V#c-njOv5{V&o=bkqX5`R>Qw^+hheR zqN(HA*Mexylwq;wpQ1Hwn2!hMdpS-!DZBNQ?GB3(16f1;or`+EqXt7_PksNL-%@j;hiBaWTIWFi@L zUk9I6KkaI#eY8A3?fCJ^n$;OYf-cI--1Gg3;xg_Kn6L3r;Wmq zeV>M{LydEii?+~;yGnXiUO^Ltm)(V-_YK>DHiCFbg^}<@lkgRf7CwyP77WAQ?=vUG zNI*d9*}9JV>*O00fFvBftu#QrX$L{yQgu;KVOLoK$fxCbLjRrI@kW$9=))u(2q8oD zd82rSl{1x^GV; z4Sk`(a>jwJRVfM?h@GZp$l%EyEeV zZr@k{l+!t&wfAc3L-&~lgj-PiFY;j*YI81OgyIm`)}ZUly8 z@$XAmDCW-)?9v1T3ESM+2k3;+xeCKK3d3R{F{;B9+(kVsNe7e!iBe&9C=^5S@?Iq- zsgzGviLhefGn#Rkq>_}msNZXXr40NWzXinstlr-caB>r(8>I~2SG@v#H$iAZJpo#% zC{kQuTZ7;v*HxrLi|(zNs8THu*m@l(1>K^=|MBQ@FG>o#xj$bzz5Eg$(#!G{6zBC0 z)ysSx(t9V3^&b>`Ax}NYhOECG^T&ES@#ZY%Ie{Bd}w7qOM zYJMdX{K|Ouy4zsteBC~oKK~lqmvqhTMQgDvUczSXHq&;^{k zxFxkeja7$y`QA1QroJq{T@iX%ce?G}bmde5v8Qj(@3fZJE!qYy0Q&K|%H?n1_~+c> z2|aq1?@Si)-ldX@Yugc;yIUtoKcm9*$e;50JGSjpe#RK5qpiJ5a^?$etc{BoyhaUv zy1w$JSubYT*HbR{{BkZ|-^a9J3)TMW?&p+)gEZ}Y@FuayX_c;NTkbZi6^!fBFozAP z2)B9z&5B~$+;Q6<*KC~uT9pL=K8Eo<=Kg%tf0d=ybf@-sWzg6J@|7kvrF6GP3X?=Y zMGNC;3~T0k%p3ywEKXIC#x=x$|mPD!&LiW zRj>;cHKBpMdV85RXJUImN7{a90&YZYw83(jK4r-lVX$gg-*0wrT`*o z^~ttm75oVGSwd>4lg@WQG!EP#{ANxe+AL$A$%wxTJ~O|<9cFm)vn%%^3Yhh zP<9mU(4!4avGgqbHhJ-V*J-|&)q%YMQ#i8t?OiLug`nd!hKcAE))|{qf|x^x z&7M#{S$CPe0H1t0b%K6SN{QJKz!EHk2S+wxhA-i-=Q=sZ_Oe^xUtx*`-X9-tl?d-S z-ee8Hhp^}}I+-CWF45>51YEFMY^((tuYn@&a6&>iJ+QNMO3hxd9AFiS3RdUet7mTG!|Cm1%qQB%>5RI=$gvf1*K_|(5IBwGDq8yO~-Xv%^PzsW8r{#PbX(7>`fa(NgmP(;gYoHa{&EtxapZiI7n)tByo~q zRrN*iYh<_-q(-QkEjXM^Gz7V#ss#m{3(;+hlwuW>SZUuj>h;vUBJ9JJ(uKa)4Hyte zoO7fY_M~>sw)^c$shA+BYz0tIQ%FclaevcOg3L)#UJNUM_uY?^G(}O`f+(OuC8tEy z_$Cz%Nh}pW9ID3J93TCR8{WC$Oz25U89$P{!oT2-G8!ow@I;X-tR9OGpwt8IwG##0 zDFsEMVJWhyo!t<%IpD=q6SvQ3LC2y#Br!r_#DDH11SqS-;pCbY-`>bHr| zlLL7W4l6Q8ST)EZrzOM%hPzRAeN=S`V|j$MO{Bm&*X{w@1H-gr#A^XfG{!u zUAEDwJP4I_rK@#{mSxA6!KeTakp&_`Wkgcq|2m7{@66+Lv6{h+KE7BiNmN0BLz0N6 ze!}RfngO1z*g9gn3k@=FP&s|HkO)~s;@9r5(zFU90)N8zpVg{c`m|R|T11>Ak|^eQ z3&trH3x&Sx`L)ap$`GjVJ*UgyLHFQNDAlP+*2| zXmbC-fLXd1*C}ZPYXen9r1X|4l6624vp;{Nu7IypVxIq5p@S0$6-PKu+~km{#^eNb zBl`t_HFWhUV6KtmJjwt9!W0c?jtWk4klqk}&6?;ADM63EPUPbbV1!iLf8GhOl2TWo zbL|#$?RtWH;x5E$IC->AO|^WJw_tgLhW~vmdCZY}BroC$y|od%2J~A(p)Pd#EG4K^ zmrj_wn(Gcl$2;GkFf2?_Dful41tgK0kXUyd32`H}WtHFG)!qO4c&DRdcqh_n z2Tmugy~RsOde_-~{*0&sP4bo;GA;NV63M!^(m4nAjBzr8 zm4%BBa*oCW%O@VThmi? z_pe)Ef2aCFco}%Pb1?MsIoGv)yw%;R{c<#V>Z?rATwl98m1GK;Zd`f$T zaYW2yIH%_PbFX`I#q}Y~vLNPM)h8+~4MhdWcQ5(~R~K>ju~BqfK56adJBs4}SbMIe zf9taO?|3yhg>m!sRk8YgmSCbmUVZ@ioNMycj+kJ}>+rc8_;UQcU+z&NU=Bt$31}CLEGHSdFP3Ihd?XHF1={9Z@)s_kj;id-3l$z z%=L2}HA}eGv*0A#yz*-YNj?!qYbRzdd*if=+~^#}cWf)%>v-R0_*n&HHaLFu&5F^m z?$s1Bo!0;6U7MXpqvV{Lnm(E5K=1kD!mBoxU(m2N4sA0}oU9NScX3s~clP4{i7cQ< zb=S1PldC;1QGB&B*5e9!3?j~Nel4;P*uQm@J$>(dy2(MkE6{%_dIIRJhMH;$W^tvVbBgd` zME?zzgoBBPm<&PEUZMJ#FgbT9lrArg-ZwfeT`%p%#UMS(DD8IVJV(WDwI)9oyUB}} zLE65tbj0U!^s)Z5apil}6$krZZv_;Ucw+wuxX4&@mWjxln8y$|=T?svUksz8c8-WQ zliY8pkZ}I*#nzo7N0y?VQks%Zd{@Sco24&mq*cm|41e^1O?tJAiDB|EkM4f_w51|F zktMRCEJBB!K%T<9v}K#I^f&WNgc%2}e2mtA`DJy(Wxx0Kf+({S^G{gsk)nZXQJE5N z;|=C#n(dmB(y$+TWTVnN)ROy_-VNLWbi>1Bd#epRRZ?{vX<<8K@qJ@`d6sg@&h%c= zlB%rukro{JeED=vJQ-2PsE0dMUfj>L{T)(d{1sZpd>Kk}l&G!>3RXT;&41mHXzKiM zw6rJ9Iha+*vcD5)B&kuPjT{0MJmPN)o>0_j6f+fT>OAEX!v`ZCYVu;H(mXc3R@!H& zd@Pw#vKsfqTjbgy`J)xKLs7D>4J{P9h@@r>n1kHTeCZS8gD(vf{U+LU4CfkB8A#N> zDU!6u9IWI3aJ7usS?ade4zoN3GMB9Xnr(L zrb~TJQ617ZJWCvoqlFGzC?eJWerV<$&a>?&xKA40&{t&MV8Dy;}aY~aHA8y!G@sJaDMw9I%>zQ43WNEgIQSyOFH^MawV-} z?dR{2Y%!GxN%c~isYx~;@)=GC$Nhemf%lo5m&BlW1!#-?!*%wSj&6P*gk!}b80pH zqh4^N0*}3d00X%lCQDF{e13p`-o3O6Akh}NILqJjto?hbJrT?A!PC+z`| z->U3ctNyHSfd96zlRQ|$6U-IwiQ8M0!UmaFc0-preZL-d`xG*O&$S+0#hv}+# zw-P)u(hv?r^<=AE6xvb^0ukd5(RKA1?uh20HZj2S1_VgMhELU0GTI{l2oAVFWHVxO zt552d(*p+_c|w`59fMP$jxk8xhGft28grNj&g#6h+H=bN7@#1^GfBMb;?Xk)teRA8 zp^P)Pe{**XOlZL_ALg#Yr}nQT$6Xf)`C02i^n3BmFjYc~< z$RK5Hqfp_#r+V5GN!Zz7-7`w&j`NA2g*K7y)$sygMwCMiAw(;Im5|NOy?g)tamdan zH?f`M$KVaMD3|dxh)l{lJ1Qgnb7}+fY_ZOj-!40f?_YoSSqD$gnnqWgu`&s;*nfoF zg$hLw3ZMsYD^8L8yh+dZVD>qD?Q?73(^p89rjY%g+FFAZ{+6jIeKVE#(J3U1uS%jC z)QJtS6x1}sjObCUtg(Og@|u}p3uPYdS>vifpjek=Esk0dhQdb1>ihKNt->jupr3-R z;t4^=F<7t{{)nTXp3^!w(gW*NhuxZN-F94+687l%tP5ydnO`D02Mn`@5aVl4^9c<;<*&>e&S$isK`!W!giiF8!7if`<y@s=wDTR@TEff*%(ybg2ILNm zM=>Xh`s}1$Irr7LY@hQigmImRUQR%CI>rN^u`huqCogkqQe!IeeTC6yC} zV^1cHu*hIh<=am(GEffUR)02C)dho)IA2WM?q^Om#raP1n+Vp_T=?-v2N9t`Idrv0 zHTt0T!^5)2f4!|qHLf+)MM=zOJ)?{O*HLI&c)VrSYzDErHWdE7=#RfTt@3ib=*Lga z8~%%+s$Z7{BbASiXRynQ!*+jG7YRXhn`qnhL{+?-SfOwOoIRDbNdMVbxgiO%rjgN*u(v0lktaBkb!3rnbg;e>_S&XaB0_e83M^PM0U>wM9kFGgt<``b zW1i1{_;nS2ddmNg!xB#axClvC8XnZh@+8<9Y?x~OHkI99Zj--Lt}AYiSzzp9^#$zC zum#5?BGjYS%dyy3%yWW5MHCn?CkwsYMo*N!VD<3n!c?7(Llh-tB=2X~I5Y0>VkYM* zxrRa)U~j%vgx(K0J`aAkQG?+?RwT@K(fSMK5PI0PO4uZ``orh+r9-%RlPyHz;BVHC zebT}Lua-^uF0g$Rs)(`jb&R_YXg{gZpe5Kru4LYR4H#CC{MKcUHC&q}JK9|2>n3R0 zT+e1g`k>zMjJ5{(auNv97V=4Gp<0iA82$9@BLuf)#B>E}+?l$)nHfZ2*7heTYFl)y z)8kHbmgFPL%v#8F<|z~r8TQd%do$pR+5sV!YlXJt#x1JIn)@dZP!x-7D}kUIt<9sC zq9gB+>NNa-8;T0LWks=F8*=1##7lNE#@|_*x78}g@#1E0Ct?8^#?D#A_1uOy^nozl zan;w+t_hq1pi z{gH*wujX}OCLU-*R3oaTTIO~oct|^y9@)ofc-Vb~6^X=lQN_PQf^nRo$xV22e@W9f zw50x`Vovm~_(Bh}_%sIa|J)K}u=q^D=#xG<41H~QD%f!(pJX?plSl3|h{1;WN`J5}V8tM^f;i2nn359|zlv+8jXpAHQMNPQ5)L`?? zAi#grLXZn~rU?(owd2rBtWX7R@Pd57jzNC9EDIT6M&IlcGNhsA`|BCl+mU{4f38H2 z`YXf*|FVH`DKdU(W=1Z?C53a0%b{5)w=xhj^Ap$y**6DIUN_Nlr^2H(t-{G-ZaZmy z=Nd@#8gPTg4n)Alp!37+eZsADk&oo-f-*+pVhcu~R>1*kg}BZI_V?UO;`6pyA*NZl zgTX}~jut@gbF)cfJ;k&zMJRX>lZgL{x{>zS!u6+aa1AOxnSmM>sSe8*@Rk9+ZpV)V zmd>ufh#KfDsVwhu8JM57Oa2~rFBwI?dSf>XdI_#^9lP;2P`+js&e$(}>=@X8{L(;Y zj}a(_N?!)HokJiRG>`8zMQj#7@~0QuI|h6>;?B`GosP^z@zadaiJJX7NH0;hqPSZtAjwb*rmxY359o zty@Jpp#Od@%EkLK(`ag4yX~2FjFwZ=^9t_d{E{Wh{IzMY&0}?v->1{{uFS=oj@y&| z$c-xn_;_0PNUA-xK4RD0a&mQYn%I@;TKBHXyO{G5k^Y<;2z-tlI(wukUb&xyn+4w5 z?)ETbloWNiY(Jl?Xf3Tn2%i3}KmkHJPC=OspY}U-`<)onPUJ&4T^}Q~Uv>ygujbA7 z^SrL=fU1Z44pdLq5v@ zlr^Djeq!5@xT)!Jn zFus(y8119!cxsbar10i1LDiB6xm#4D)E}Xt>TXy_mlW1rkCDXzf_I-}6?V z9wyg`GzB?%uIm`jKfSi~&CZfcw%9(#vnD=WduC7OMxRLuV#+Vwqc&C?uO{Xz%=Y&f zKkr#^IBpS}Y)0RZ5ZixbQy7>RfG6_NTe5Nxlk+tTzvz)iZ^Klc2DA48t)jHcp*C?81Gx)4b;>UAjR0H%;$fZBb84;(+ z6y%;jNR!Fm{2|YYlr#R5C~AP1ftgnx;>ekjWEj%=@OnuGZV)TD!v3wTm4gUeE6vwH z(Jic~WngZ#{nzZ7mD&BX$?qBs@b{R?Oxa6JC5G7HkO@2B^eXh}%=VHui(9k4U}b6N z`eJeVgeM|O4qaz&R9KhbA;lpio>Lv-Pn~vXtsM8LP+8st_FZ|b%H9p;z8SWXynS4J zNNHb)W+gfLL?ngO>ePai8uO;(#-}*bsQHh$b^Nj24axWas2|DK6Z(}64g&I)bWaIN z0O$nc2;pX~h}E{_cdWf#BM9nQ)T8x`9UvZN*weMNF-cvfhPl+#cM_*iWVC<+}B)tPB3T)<1~a2z04xzcr93poNKN9^lQ~?r1JS8sKv-8i&sIE znuy`YZq|BUa?RjX7J-?&7juf~#Zk;ceKIcTNo_K0mB%SROIj+G3ohaE+FzHf2cXRp zVEHLYkb-n`u9ZOyR`ennK=X?Q^AHLAAx18SzAlHSzf=x*3WdO#tM!*43~2g+Ng@QP zi`s2NB&V-N9smv_7n6YO4jFN9#r1fk#fDq=BQX^8r;1<;D$PiUD#)jgppe=Jh_;(z zG(|2dsRZl~Blt&K;*_hiSTgkB2;e=I?WSL#@1H>+l!(hVDfa}1q%^Lb?{ff}P+(Bh zOeO)1lAP)w!`lm$?-tzkFx6k4UA9tDr6&#oaXKo!_-*nQpGG|OJt$>-c(?X$LI(f0 zV9Y|yU+~dCM{UQaBtEciz0-g85I(*9(cw9U8yMSsdBcIkH1^WId2N?^wE$o`g_Cvr zY@gOG-~pWvRSPC;Y_^vI0```-+;(pZG0r$Ea;VXr5`{XIs!4p$LfntH?Ik~S+!ht@ zv91)=ttl%j(ygt~%EQf8jOSY0-8AUucDB}srHkiaj?ndup4XYQ$2itY35+byhrIXG zDxl_3?|rVrl%?Fdebp-u2PofAE#7o}?%3raC9G6zosghiSLY5P@SpT-a7OkKw|!@! zwCtNT|M1S%;fc!e-IGPn0*a;>@%W&EwXAG(#k23uU1W4H-#r#5H3=#=0!OS}8Y~+z zAJdBvZ97cA<9%q{_!s-_`OAkbTDj249@Aj>Q!UMWUj+x9Ws=PF0fZ5Q1556;GLCdd znVn`CBy(Mm_8jp9m9;5t$`>s59R~1|M<`OtYm?QAP3%_&-ev_+xw~4mXjP`R2*39~L#0r1$xAPOt2$T;g#=OJ+q8Nt;;^%sD4nbX6)^-K^}YHcU!@IQK_ zmwt#4;zNRf&_RKK{PYtx)qQd~FKV5m3d z1C>jERH4lMuJC1%#$#1lO(xz~2G$!~tZdphncGhDx#S>!Lc$0J98m}1%Sz+I7DM(F zD4xJ1WP!`}M^PyG!j-ZqiHdfoO%%d3+^sNPz8<7KZiaK#!G&S@HxsqI-L05ynf>Zd zhR%sU!Zwfy(zT(Bz(R+u|Wn8W28tXxzJ4?Q#&X5R`Ie>iJu5}CV`ROvMq zbDXwW<>i&CW%g8>Rc-7hv_fG;;% zO^pKj9kN`!$2Y8qeh#*;xjQo^`D^AAO*p<_%|Gu;#V4J<4%UYqiX75{aE7F3?MT!c zLmB6C{pG=))Fa-f1H|w1&x}R7uzm%ik!EV`uSAXmip5vrp`UIYTd>>iD>O|Cs zLJN#?q70>|F=wdUMHR7@4E@0kx_tljc^72+qjvvw)!Y;yK@4B!#QOP{Cjl7r%RtX*y{ zCr-~ZYG5E`ECn%r73$9|Z*dGow$i^c@}12=P-Wh@3dDf~e_<6E`Vggq1)4Pyi|FUc z^q5=b)Ky2t@63t!aQq>Lh~qfJ=+9Ys;?bb0{de@KpD!3XOdTfB-@Op0zh|T3Z1zaI zfA%JW1P2JoJ=8#a+|A?)WBZ$ z;}+sxJ7B5)vaQ~vN`py0>Ik~Z-I=ZihN4QRBO$Bw{qFSW07v7cXpWMS(ZLz5d53pO zp6jsQbFSt~X5mQu4bZuQW=c!-NP%U~XhZ}CzV~Ar%@raKS`tpyQz~n)I?gXNI^~(K z25Jv=AG80kKRi5;Nk}}LJisY-ksJvpM@ca#0LGt|1)c3h`nx#m6jM1^+opMEQjf<{ zM*j)ktM0>eRBOT?PYy`36opqotW;uGLLIZnia6grnI;aZZdqdAPcr=btQ)itL}_?e z|1G-VuxdSEN=TyRk}9ljV`jZ@_wJ~6dm_#5Dmj~}A@+H>j}wq&4%tet_2B6q^YMjd z;($Y-eqEu^Ah8lid_%IlPC7^+AgI`13j`$TDjO6H$WViGL|ID7oWaxZJA)&Nq64+$ zH16-iR5Q@f9n-a^)(CF;&H@3=msn6kT^{r`1rdnlx2*MVUq@PYJ_a8hiDk(zn8+hB ze3WexoBF-+J32bvDamumJ_+xoJk(IiN@e-#gf4qHb}{>xgpW?$w}g*-?_G?LBsUu9m{e6 zzn5y7LBDP?nR)m`bLjt0NyFPFJ?vI>inln*h6qdN9`E9zO5yvWTqVMtndlW=+2IAsT z$2&Ny{gtw#sLbSv@J0o^R4c=ahzL_aYX{*R0Tvuz=JD!?nhj)%gV1oSE(NamzoIxV z_g^IaJt3kp&Tuh?zAv`CP-<~xN#qG5flLu=&RB9tR>E=K1G!p_rWB?}Tq&x<)jrkX zdcGL5!oo>+8}4jzi$hYp)v*{C5u`-?Y%4sbQr`y+W(0NknlWmX5qsOMA<@l&{P+pB zQsOjc{$?K2f2VC`iRgm@UhdK7jn00iS5BaoQy?x(Ga2dp3bcn_iKk(*>tR|o)ec|m zm$O;r8`-_2N{fZBYHI_oaFNBXU~>ozR*E3LqIPrBJTu=1sEx~SP|sLOjid`~7P;Nl zmUO+HK*dU|_CC9(u53V5^T7c?I-XEv=dB-0yV0D zK-O+dej@K0+eiFPqL5ZFEc-M$@`KdYm0k7^9~kE-bZp4qGhd|`-9S~tAF{etU~y1B zYP}xAWE8uGom=vbayySw)?+c#48Lu}1it9+K5_N)sgTZFGu|_VGUxQJZ}b7%q9(eKl6Qs^ zkdn&B?SjNS3{v3h@8cWQuoB%?tq}R|kcXZ0(nXS-!R(xr9@zIBfUU2Zw_doLU}F>A zL${r+ZydW1n|X)=NpXs+p}rYZvs;F%77nd=84~TPO1DZ(!7z)+j!+A}jF6OQ#l3#e zDoFm5V&zJa__@>ajCQe8tg|`0nRB#yk&-)_d?G)+SFSHL6xv9s)<`QE;#rII z%8mK>cuxV7lc`V0k1H225prrOJM^oiZjBUbnRq_sPJfphAQ!9-KbbnKd^%LY%nnMh z7)s2MH{)53mnxT5K~FicB2-m!XO7;J??*C9EsV|=ylh-ZCZFk#Lg>a~YWlZ`J#em( z7;fGSjJZtPG4)qZyZ&}MI7gyy(&6IXGgYYrVX1>+K{OI{ITV;hAS0o4f>?zgA?bFq zvd`g9zS-t|02bC2Z1NYF{b2+%DQD8vt|wxqAFr z1AcZ?x3m&riD(q!eZu#KdA&RAKI}?gaae3S1Zz*itr|k^Vb%+qmPJyT$+XsA<>$MJ zyB(uC5NdnNl)FuWtvwnhVeML})<{j-_!s_y2q9qscKdV)7fSV#; zu5Zk>23a$Nq6TpVOgEY=grwR5D+A(l6q|~7+p9ilS3`t8f3zaDkPa9Xf=w1Ped@W4 zRXrJyLYgaFPi7g(iv@dM%9-^aTZP6>HTCoY$`rLN3%SZ5CAGnSzruwWV-(LEm_aa@ z@TpVxkKuvZsgr0!l0wqb1U14?8YQ?gV^{S1oT>9xH_RyX8|Z0iw+CeAMYuxJezVq2 zmd0?kgLKqwi-T1V90*eZyJK#dUiyctgYWJc!f3h$LQw86#ALoWLy?>rGQMMOK8I}% zOsdnFaG_4Oy01fq;7GgKk+n003(Q|uU|Sy!DC;ES+yoT#VIzE}#>$mo;jpf=>OOU= ziO84bu6NYd)L8~_C#_`Vf5r6$*VidaDI$tFA)$hv?ayOrw}W1twmjYAz9FLkH0v&bzMNEXaVWgR_SmwM54#a4zs_(4g5S(g_NhYc323b~gE?kQi5A`L6n2KO zQZWcO_);_S(Ghmhf4M{}`ln0A_4RFoCBVot=$+VWmLcpL_^#b<^oz8+$$V`!`0LozaMyU_Cl#MO4XmeY4@>vgyXWPQ+pjWjxYJFY>3{U&k`jT(s_O|XQN2}4ME&Vp#y9bKbanJlW!`PEg0G- zSddFPmmh{0+BSdBe?F0`*BFwQa4qg`n{5NU3)51o+QfuQS^y(fiB@{#JMWdB@a+ih z`pXb4mBaD-x5yO8gk@f!QS@OI4Awc4+;*Rgu9!gYySpju4WoySxQ8vzLMmtl``xe) zf50>j@;{pj1LmQB(4ha4@P;|roIy=kncg?L!K?hPR>5r_=l3|U);mC4XuB84GBbGZvOsq6qnlXa>=0>z&EpUv6?r3==S8K(+TJ&DoU$bbTnirjp`?MSmf zurYW`^J)BdwLUqhc-e^CW%c#QyKy||kAI<)qJ34qbcMC}{rDu=| z#WqBs%ra@hln9qdubwF;tA+otIpNuucDxh4-Hd_HzQzi4SKGe*X|R=IlDd6R6J2_B z0nSB-G%D=l>LuOV_uRMdhdcmpQ(?!W`P)uEt=CF;E{0E5fw0!cMXtrik-x9&+8EsQ z`+Gq=?^i2Ymsb`pSzOLk&lfdgI#R8OUZ=Bf0wO@oVQ*8RbxG?bmyOs)!Vm$CmXo+^ zf!m|VwG1#B$$8m2W#LwHcjNc{5Ex9bLh-Kt$UAMyL_FJ2$w5@I(otQK-E_&$Ij!us zJyj^`JC<^o7Xq}~f!oqql)jrqYLTMO>-V5Y>ty>1;}?qppqah>->D3Zw*j%8)k;<& zPebi>UQ+2dZLb~8(@#qab+0Xt-c=NwgU*kkCLk&Uo#*X(!2?6v^|<%((#EskCiRe@ z%fahCw-0bJ-&?zgSsY>Baw|R?P<%H~N>cGkbUB}A{WKfe@0Q`ta{P+GZ<%pk=T&{j zZS8KEcR63^n$U^V;jBFlX`kn?*nGg(&_-Im>Z>C-bF<37@lhh}Kr7-IaJh-gr09B$ z0S;L-aof#-?YXM;n(GJ`0gC%*lDVrml18A(Q1j>V6d0#nrOgQdMcu`n2Jf z*M%(wams(4^)}`(snGS5W~11!&*QQ*47G;;a{O;KH1e|J{k-Dw^J&%uW4|W~K)}#6 z-*RXlRlBYu7=PQy%{i@e_KZ|+|6J*?ne;08mxCf>eDVI&RtD6YkA-pQbVh&j55CWl z-+t;{UdQwwbPYja+W^-!H#U-+H)EvQYA)M-CpIi(z1sv5U7Q|Ah!_NKH(V~j!}a4s zmiI#v9D%p7*{*(%fk;)tnR_+xIU&OO-g;9}XkjtN{S@o&I;9`xcpRja5l@Peq(RvaAsz8>)*@hXZ^#-;A( zAF;tZww(O1@O48Zofi=#oaYd%Hd!nuk5(G*{m2fmBDq*5Ao&fHXyo-6}39xgDiPM-JX^dIsm+Z}IDk!tTh56|eediW`SlZw@Uqd&Jpo z;2k4kZOk~XZlHL`I{WzQ<}Y<5YV@->5icl-`^3Qf|2dny%-DNxI1rHe zA4ye$pqPM@Dx59O02a5I%%WQ3Zk$Y`0f*gWr-W!Z8@mRqAVMTcrh5HZ?lpDWT5J^@s0V>P$XD%c)ip70)szMMyF$?v-g<` zJ~I~|NKlw=Ek55rfv$I*HW&G!(o<~m(mO#7!($+{Adp2;?%|O7GX%pma#8RJySRtf2mlU_eaB&!&EG0Zux;b93_l~Lz)u5?1t=2V( zim|6TFySZK-XQ~m(h~DV&YfB7j%cx0ix1G--bHQ&0*aPWxYC?Cju}28@I%Wh)wsVJ zW2GIwx86c+bva%R4=47RSHaGErEo`)f$LvC@&GGwx*7w&Jbk#rm%B-PolKNBKTzCa zn-gpc?>atF>0GQLTW2l^tq&_mFy8@-R8onA8+w6YaWSoJ5ME13jX_1eq+KE4X$=@( zV^WO{mXd~>umAQvCP{xuvLsJO%xEE3k2;3PnPk~uti5z+mikTV`+Lh54KE9=U2G>X z$NJ?zUnVE*_Y;u;w_D#54KHyv{S``~m_4LM@recM%g8HLSMxB-zBwp!x^IN=M@?Lq zOFA(@PW8fvoHj2zE><-qf{+6P#sL^cajvah@AJW%&>>I{PQ~~YvcJM2=(dJLS^txp z5URJ9P#D$#R2n@x?lKnnJm1}Do;u7I7hzaix^PVoyrpeuub|EQKqGk22-!dQ03Ztb zV~H-9Nl4wY@`ld2?a&#p?y7x?%%x{Z-7=<&d8}`?sAZPh9bS-b`RzB2GCm+ufNBz# z_%qFt*2&FPBMHlNIouwu8Sl6T&OUxOeO5UuRK@@2iPq0AmGv{%&q6|Iz3=&uhd<%n zc-fR{!v(>9X@h~~fFN`Mu%sYt+UYERVed5ado`woA_cf`W9bn%Lcn`%WweJKqMyXB zlanUHNUZ(OYm>tSX&mtF_{V`!#UTIOO;a}UBxk!u?P1|>wQ>taYw}bAR=y0Rqcb#` zlz(`~&b5m%`ytUpkg;<_>oM$*WBmHXjpu#M;5lHJ8i_lQC!+Ip6QG!i1oDdA;uWpq zaZKDOb|b9$@M)9(tlUPkVYw0MJ5ts{n;vOg6eokHSb2;QrIob{< z9~-p@*26DoQ|-jMjG%46Gd4mmj0A=qp`4TLmLW-o=xf~WF#RZ3h^hv8hCbZ5L51&0 zkDFRPogWgXUl_EGKM!xHY}VNp@9=CD4mI6wn!6~OQn1{1bAOdP_ETmdvxLc|n!do2 zwvt_edqFrmD04-B(Ov$wL$pW$WF9{Ll86;5B0i`**|#0KdiNCo6;b{0Kt3j8%pl}1 zN|7?xcQ0a6{u0nNbjv3N47`unPG^T>gsLJ6QBhvRa@G$~I`D~FlHmS9u2jDbyyO2< z%6|ddU7XyG0g=@PwjM`POoHh_1QrsQSVJj0YFJkx=0+UM89#KLb2*o6VSj{lO z*X^Xs38z>=cQf$x6M-}Wnco`JoZCP~fgXBYUaHXj_smP5D+-l8=K z*ajHR`c2P&=JzdLpeh`m4wa@Kgqrj|dLdAYWBeaLMxPcqcprm952H?I2F#f!nG|*A zxStp}f+?u0`*+E=0n{`eogd^_?FNLdIVUW4_;0=of6{$s9;mDDc~ynLSD-JeuOQJk z#Wtrfv-y9yd_7`Td_sNJ`@h|i^cSKA6SZR$hi~gqG;OOO;fDX&=vDdAiEa9AhyFma zw(*-OfWC9x$+>g-7vcxbrVERj2jsM7Eoozl$Ui4y0u+x4Qnv5BaH5*3G-{UE)HlEN z2|!@gEz#Bgsr>kJQ1y#j*BiM829y~E2X|=!9XDlJw{s%)bC~4K|D7Sst7t&D-_wxj z!e@ljKg646=h_6JhoOWh>7u6J>3hv1ibm;n1E6-AumxY1XaIF@C%o z!Et%2otHp!U%Ap`c^_KDN`R#q^KrcVz2)%I`}OuH=|;+jL;CG>FzB!Y*VQrTyxQSB z$ppV@egA5sz{<65e;tZ;%k7+H<}({p-Qlv4ppok7I`>akI&d-Xk&sbU;knNfq~mtv z3w+tQgGkXkS*Kq|T}0X>Y?fJU9xj5ayn0-!+byqaCr^~}4!hWazVl3lwKUBwp{`H!8xUza%Fg|gtb5{+0mZ|1K} z9yc8?#_RWHYLecZz=vodCcByst{X17oQJ?0RDcdb=ELvrPhFq$?QMG(GF>0Q-sS_v zXB*TKx7tSYg9yiF+LZPE{#-_FoU2W_Vaei>ANi6Pmd>I1YHUckc5o0(K zu>&JtKN?Ymaq3CQ>bcWcNX-(>N7?o>hV0FZZr_U@{6AH?thGE$S_vE*3b`WAGt>t~ z&5xSE9);B^%J<$bT+V zxX@1sruB=jWwr8B6F(BTcdQJCmm`sqH;Ah*z8+OI$TpLsW76w?%kJP*@vL;%xhfW&ZR=FU+UJIL4Ovou=f%6k)fV_QsNXH zQQ)NpzV-b-qw%^~B+MJS*HxxnWW3g8P*2rLW4;q4Ogxh&hbLW3 z&IyXp6mDwP)CVX~Mg|v;-{R$J@^eJ)CA&o-cWcV!@=X{@B<;vBVT^MVEu#|V7mumcDBL_ayS6pT(PEC*qDa0t$$?I<< zK73DVzDZw->M8&o9nxG)lh_!jfUKo|cCAR4D-t?B5qhD?VA(G}@|T-8@&p+N1>dN( z9Pwx;9T{HEyzZA<8vJiZ4+hizmm9JHI;4x4zA~d?UQaMX%4h^^F5^&`8lyXIF28gW zEIz&RV4`1%Au&Gsf*?7ly9`{A& z1=!cHQOuf`OPvZS@}}fkYA(&)s3~*Wr(!ew9e*U6DfzJcGbYwZ`u8Pt)jGRIr-%YAJ&m-6Cp+Le&b z=#h8YmEzdA^IREmywz}>QHp&sw3wx3Hiw-&zWh=rGRVZbRtqVhCT^wlK77Q9Lf^u@ z22OHaeT&U{)_iJ5?)JsN3BO|&kMDne0|fxY=HMji1QfPSxj{RYNQ=~;$)F_gMJw;NeEPNM6@n+uAOVtu!%OE1J>5vaEm#rE2|#|MVoeR}vRWnAia( zX(5E_U~~JK~jLBSk=K{+7<*@9GtgY~6_KZ0-t#ZTYd3fSwS z9L?&TRaMM#r5SHE(&nrRkFx@5$ymi2sx>rZE9EPE!J^HTUL7kYs5EL8-*BO3zNNc( zVg==w8Bdl@_$xyT)z-uR#?`&vwsk@U0Sz%jy|MdS0-MAncU8pvDk+p!Ih!~^t=vjN zYOZa+Xd09cBM>_phC(JAoUA}*FOill$t8fakqhP%DprRa;G;~UO0Vh}vw9YJkkN}-aeoCzde`e{Z%+sXuaK@o<#`=^}^t^I!6 zs;7oVa7?g7$0}VlPYqRy_YaI8Ll8=MU=DpQRV3sV)0Icgn$(cUjf89qTj$?W^eHE? zxtVwAoZ0UabIhMKcm1G%;ZZYG>-0yeD?U54u&0u!al+rB&FXUW>$q_O1kN53HEv%F6g-1X`8$23CyOO1mqb-g3)z zF+=vqze(TnBFupDXK0w`Fg40zry+_txYnp~4?K&L>jQezj}m}*UZ6i7xAXU zvO(cR>KVs}`}wIAIU||1@bmqV5nLF@lG73btMyR*d$Ktnso5kuvv*TXJCv)Wz?J$g z-k=)S-o7lX$hJ*A$CMgkjAnt)0w5LNYRzi)cozs`fZvD#@g;7sgRyg#G)$#McGJ`b z{4@b_*f+bHjO5&=Ixy$wsY7sSnY#$b^4fPO4SpE3)Tc+&{w=Aq!Fd)BWw#w2{L}5q z6AA&PZM+WpcWw>B^^!#_D&!}z%6u0L!#dy5mzw6PzlEMY;!qMDP<<7vM9=7SS3C~;b|`u@Qf9H#K{b2K#R zk`gh2B=K?5wIKuY&wtm7)D^R`OSy$60J!Q4j55gac=C2vNL3U+9@#|kMevB#3S^%R^iv%q z%X3r!m{Fc!gi=RdA!v$SHmPQ@&|FG^04=QCO85oKdeZ#h4(VQ6L0<8bgMnZP*b;%6 zw}z^xa|E73KMzxj}gG@JiLA`(FEW znvI3wo;}DQQ>aEMwVn#M2bh^>|4tL%6Un{@L;X~nbBD7nVY|Wj;ywFd< zlN(Hl{m~x(I|fHnl{Ycw)abTP*UX;AAyvsuZgH6euRY$H?>(RvVS*yAj}$ddm&scX z^ct4seiPjz{AO?yW`Ih!U0Lu_L$UqI;xI{a@Aj@+}`uV>Gg z%RG(-MRW6DPS(-j4|)}Z9eZjC=zVi0fRDb!KhZF}=JXsC05v`QP&Kr8zq}f4vwI}W z`B|Lzw%c+5J|&LGd1eE#^^MpX#RKlAo%c1T!#K2~$AoF@x0sHeUIv1E27>aL)%=NO z!1~|7L|r~#gVMe0^|{r(JNxy<%IY!99>+TNgCR~qk7b-=oC!@y} zYr{`s6Wh4VD#5zkB&y+N7nTnER(%)$SRDH9h^*P}16HA$cW}xcg-@q&n1jpSA&Lem zm0ab;p`?-0eT}@L0F<3Y9H@4fWd>nDndy2V{%ixy>9g2H_cwOoW}-1`($|3))L19n z(QKoKY7}2PV-;UJXBFc(^P80hwAa5;Hq6_v`GgZ0LPcNupD|@EGetP3+|JRm-#oa0 zR*dDf1!=hK*q&baFm4HuY*5(Y8PA`Ib?qv=??RL&rbx|dH#aw3?X+X?+}Tzr1R|E6 z*>6cKIN~U7gb$pn7gUR)n_TbW3HFFx<`*l;m+iSZVg#@8w&N^5f^H!IZ}?YJ2Lm28 z>mmEzmK#u>7+2uW6@IE0fmuO@L4Bmjm0O6j`2X&0;~aSo#@|Ez@GB_N$V93=C?O&v zq2KUCJ@{U{YXrn^cEMFQ1NZ%mJN|v8^bmbKIffA>5U_8Q?vxwUO;uAem;yHGm08B4 zd>#?5xMzv+1kxi@?BtOF(y6Oke{aT~A(0+eQp_lYJ_^SW<~a`)-KH@Z%dLp>9Apy! zWR^Sqvqo(e5t2!->@UG=Eh@Ld-@C9`PewR5JRZwm--mHBl)1M-!Pn=;Z=3wK7u8g`YdpWl{y|%vJ3bXD^ z$1egPYyF$hm(2~HH2+48$iuIJD#J4C;>VGAdi#7PVLVOC***-I%((sT%HP(MfIFix zc%=kULF;!=@|ofTgh?}z@`B)Y#ojj7MK_%av-Hu7?2bVDVLdf8w)?6-1$9)i9lSt4 zTfyGg0~NGc7(NDl+2*vRd@?m)C%xhFP_`n$GF*@^aI#kq+L}GzH^v_>4cXo|5SlY* zSV1a{*q)iX9pkEyg^Qk?={+Q{q+7Hgnghp{j==&^GHyD6*cnSdR)a{>vQ)`&-e9qjHv1@NQp65n)mvzeynwN8m zV;-1WdNe$Tu4A1;N1t>qj2{9ZGj|TUsex5mNl@aKX1Yu)KJ}&}`P;qjdH> zIP$i@$$n}+UF(q1er;=wuCQ7AG}Z#A|7~IiV`mpI0DshMa@^k+w^(3vTdED=%6TO1 zMDjkd+RFjX6)Bp~c$C=P*K}4Bajm>dF8iWB3wq+%o<3wg>u<{r>qDns9k|;MV|kdm z9Bx#;f7V+%K0V(Ky$#;>hh}+s-d)Q;s?2mItpaXa$B7;lG98~&DxHrvM{8ad0Prs% z%mm=Fo66c8?kvPN<8Iwu*1Her6Top<@pgvn>a~9=J=S>Dspej4n|RnIkl}K8J(Nl0 z+GPH`?(Zw1x4Cq4I%~1YdfM`8KONiJxKBwm6I(jxXd!$Mc6p18H6kUYvgWDWx;3-6 za@M=G$UxENs=41)$$G!Zdx(2vR}ir(8w9wTW}V$ljuK|$&@@Jk+ZhjXV1lSQF0eiP zWX3=AyXmFYy50w-9Pc}pGBv7c)gSeqMz8nqBQB*r9`2P8hpL{d$ZddH+q)s32@US! zJ#DVm6GP*nE&`9%ndFxj!20Z~0~2U=7((c*zoHfCa(B?!a{}IPt$0#b?mQ+ofE~cN z$5&@*v-Rl6h`|vyN9OEvH^mDI;gYT9tstS7$ZIX_QPWGjtYd#v3h}BX z(8J@>V|VC$DJ%IVh?WK)*8CC!psPM?BCu+9UOZdU_;^dKtDIkWrspZR8xe@R==!{Y z5|*-XyHCpTSn6nS-ETkj=e-;Qy1b{>YHViKLqwe0+g?`!!KXN1l~Zi>$6}I}>Ipcn zTipL;2p$U|dRaY5!*_6=ioUkbP0n3cSdnJEIh+zKs&pu~R5$K6Obk_hJeQ>Kynb;? zK`CNU>b?&8w~wE>7h|ma0t?hKS~ZM1buN^qRk`()amyy(n z$~BaB8Ya)jfZvKxOKy}u{*nl(r$u`RcwHmsv)Fm_z)nU7m&(?2+sL`i`t^axwVzL0 zrkBUegON&mr^9PSlxO2(C7e#mb$?7*=JHIFb)QUAyyw;8`2MNMxQ645QQlTOO@+pS zMu+u4Ue-{Scjw~Eo6IN>(NiTM{pK_;W6tAUBto^RQ@2x9@p-lNFlF(W<>~d=?Qq*! z&*k!AFL?u5LN@`$n#S|0`p)CLSWcFX7{HiQy#?pdUR?!eBBZB-@nEm_iT+@zmxcat z@R&?>-03rOyt&+cX9H%4mHEvO6Zlabcs+!LB<*r$J~$_&5Ig}ex27e1Mt%M2eF-GE zqB&&x=uiV})~3;Ag;rvYhU1ycjtnfpMdjdxWL1esor7v zY(1-{{`3{WfdnQNHO!iuOC5;AaM5l$ytBmq4qTM3ed&9tzTEmwlToGb_Of!dnMh!E z@=Ue3;Kz2wpzTX43quJ+LH7s|j^*<1Hc-^NmC&JKVG`$;+ha5NAsESN>-+9h z8z%S1Bd6Cp2GZkFXTB{cRbSgwF|yeOHZrEK?sen;QE+)sIpOw*@&R1xCbypgO20j`kK&-~5ZcWFC<5CJ!Sl*5# zfqyEgiIRaVQgI(62J+aeVx(Ly4T(*X{LBtcmzf@uE|brbEhc*HC5PftmaDkZ%;p1L`+Q9aaMx2>h;C=~BaG|x4_&|9MmPCrsMCypjLbGWN=j-@(qL1om-(;$zHieedJ%g{D@`U&x?vR$KH4m@KJa3N}a=HO|s06 z)T~?TDtQ-4%N|?<<^ZE*JabvVtiH%kzL!L6!+M8R`9d}o&^Awn35cQMa%y)tl8M^8 zP?MZ(t?8t0rA}%cCJc%Hs_QMFs%pOX@q1~cyHmPLQc}8+4(X5%k%mi`NW+!xF6l-Z zX;7rQQAz3iU*UaUKluIq&$?%=v*tdt_cMED&Y3fN&g@J*me={pAvr%bjT3Nddt$I_ zxK$aq30iZ(C*d1!%~{-?Y?}B=HlCT3=V9DfJ&_mXFt0UVKmJz3{p}|~p;0f3p~lnz zb}Ju`0jPxOgVtbjVisR>3CgZsJ_R3@(NM`PX}ISJ=xP8{N`1b zYZ0Cx_RZzH$0D19uA1Mn2>q6DS!XG#4dFTQD)D>tp)5a|1-9bewBukyQo`v2SVDB> za}2`%4`nBUSK_s%C+@p87^_FeXG{H3=$p$VIc1=&>SEw)+9k0$x)RYPR}?M}>dYqO zmUvwqOkS|Vya)W=e2k&4((ArEO=pQ`B5^3le0B0rVn!bl?K^KMIZmb|Jf_;U2E)@M zhdCH4C@DpastDXAXgp9*3|=@9v%kE1MnA&t#w?1t5Z7~SO{Z9e$&QUFsjp7+dEZR! zP{t9Yrtwl(DJyqX)cGPMUbu+7Xu&=OBi8skAg?osfk-miI*f$pApV<6yrLuvFTpV4Pk?tXBRMHeeW3Qq@kA$0=#lXL{>K(*n1*fY#QwjeH^pM3TlDktsF~x{%Zns=`e1sl!I5jHOe+`?2G>B z0-)T>YHTNFv{XR9Zo3;EzWw`EX2X^sLM%4i4lRvsnQ|iLruTJz<95=J(3^98E4#{j z&CxJfHcO)tj%3TnjRZgA0_j5i5LbRcgFEJ_Tjv-ob==|COgg|sWpb-WQnbU~DZ4eO9||>S913|5+lDWM4Mn@<-!8lY0V6=FR(T%P`Lxfp$5qB!+rj$jca6{4!kbV!2w=|AQrMuHh#F zZtYt`d|JyP6JLJV?GVayHY+R%>GY>!IuxB;I@mw5l*L1b)2>(8NOLyV427UY6~qtL zZ6z?>wR`$rCq#D8vIs#1@edqH5vnHx7NeR=)2zu9~<2Q0a>sdaL?t~YC&wTB&}F%)`&Zqer~|qv^NFTj5lZ?m>Vqv^p_jP~)1XR|5QJ9Lu6aL@ zfqfNp9AP$vE=9A?chq-jj2XQT#-jw?4tUyd$419Tsc+m`Rm`<+#qpKC+9>+}YVNq8 zN54Y%I+WGz<+`B+ack%YCbc3DDzH6g+vfgJnv$9nGQRqAymr|uD#vp=X-8{gsN~hs z$*hqxuQg)uXkK;i;aj|jw~?vkJPpnsdWSa7lU$R-ER9P^MPBcaEK(Tjc1PynbC&Wr zx5FuxV@p^rqT{Xb!{=7(9=rQzHv5;m+eKq}%fCY&U4CMH-|CsYzX7Uwem=ITD||7DH0pD8 znKSBTc=P?XW6wbNPHpKrF=b&?v)oSLa_!6>2ju9j2bwuD-_J1c_+A8B1utz2-b^;} zNI{-dBhPg`3r~;Fgl<2(8DF)TcvgT;()NA4ZhQALWDGoBf#!aVn}7sQJFYTpKG@xj zquF_XIi8W8x>aCrMfqH~nNQ*|Ip4cMZz1@G2+WCUkHwZx%vZM0E+yna)EAQW%#VF@ z8@Y>j8#G~$7ttCyf{NcvlBRDFHBirgBQjNU_aZV9`RR6Pvk@{c{|{oOOPwEvDvhHZ z*{mKKI2Nr8aKgq#i$R#*QqR^2oF_}C(QslKd1|*-VqCv?DBuWtAGV$qTZ~ZkGQdv% zs662--Y;YMvJ^^#q|;bRAg7kqr~|5AcHM^R+L(#OH+@%NETgpwpG%-ke^aw z@JrZun_7aIpJf@L00#}$;Q4-a2aL3G_1l;3Y*pUEBZ6lR7b5OM;eoz#R4-EVGtyu6 z_2X!81&dUnD7=V$C)P{FP~RPJBZMW>A+4UpcJAnjw+QpmcX+`=(3Ln(0W(&t+!O=E zXtA&6ePuC(C0R3L1Gki?-1%WE#B_zwRznUOcBQ@zk5-r#CE9oYIl0#s^v08qUg6&G zBf^!-jfBBdZOtKlJJ))9rSeFmj11>@qUT6dFGj%p%-eS4al#&Xj36_ zccD20^=BmJwk{h{U{yYK){QI9C5<;ACRDWK`b2E@4U=B+h+Oq$Q*p1mPSDx6@H^Z`?F=cy7^wDxRC^L*MioBpVKc@& zvnY3SENFwx4p1AJ*=$X5wq;bZ;ukXrsQcjRPvc$6EJrt4dT8wFKh5h#W0YbG+U&qn zsHF6DN0jMzlr&*Z#I))$l)(HfO=DVQ5fkkpbx}0io>Dumuew?!-%`}e!L#>DC0M-! z1B;iSZ@)w13yJK<&f932wE#LL3Ef490~P|G0gAGzo(|54F$H`*Dd6>9hQyRXz}yvi zY7~qWO~Z3(Z5zb>&pclnEI%Y|YT93wx06_wT=i<T332cR76Ln7!cFzn$98S-`d!&SQr5d3hZ>k6E_zp8)c27LJ-r~aKg zu=5|0DxN?;)O+D%SzLUOmsn8%m;wwW0!MoRVZe8uKn8NrPUZCAeE)J1FckW_#Scdy z;Bj97TYCXx?sX)316?1Kd3pomp)h{S6^85*9}DRzzr#ndgW02iBp^$>H_k>jHU@7U zES!u?%~;Idm^fKFIM=Ev*nbs4Z{5;F7EQ1*ttVF}CQk784JW;%m~cXjsja0Q88|Ji zZKlsGzS$irCz)gR+w3GY_T>DvIoy)uez9t3d))Cp7dls0B+hbXx_@yIDO4s2M!3pXd*<4O^ckFe>(I} z#_32u+1w(&)SJNXkjj(oAFa^JVsoRfdWqeDK;gp!6w(yKdz}UnB=)Nmo8toaTDe!( zy;M2CrjKZuOJjI?ZoZw~>KsR%R9I}-9xhUU_$48;MU12)nJ_ZV<%}7!MFB|cng3cj z-K(D_2H`uk@ZDZ8D~AOO&Foz)PLkhPW8<8qblOHTSOmNpkTiZb{xLpZh!!_7enW#j zB1x|P1e^DX`CapryI~aq+1a_)kGi&a;j*M9S6!>g5+RFhgZN6MPxD;xilq!O^2;?2 z$-K7wSz1LqSC%qTYcb|~T~vCT5u7RYDDC+qF7>YUHR_{l?lsN}*vS@R1;{1xofljj zXkt;fM2{Q$PbT4tP{=k$Um<%KQQkM_dO=@cGVFa@k0}?{oCN{^a}WUC{h#3Jd>|?) zK~2{#mks?oLsd&UCKiExfaDZ+5Qd;Rq}JHR)KhX0%2kO6dD%US3A9(vaF!##rJGT; zB*5*RreQ{rkTY#04Q6W)$ttg_LZAE`FXQkX*-2WvFD0Ls0fqioN0E7GSbgKOHIV|Q z9exvPIzD8?bfH-4Pm|r#c)S7I&z1u~-C}EiGe;lwSH>aDCEm*{*kaH;&S#)kH_CN2 z+}N(R6284Nmbl7ba%u(pe)^}!W;F+z+i`(z*ZA}eSRjXWt`WdmaO_XP4yfrtq${(e zO21cZrOzosjk0capzBF~3Co$C()r_nzJynw2}rpkf-WD?LM?O_83kakk*`9WFaoZ3}JGIJ4gliV2wg zj=w=d6RuJRuOQz^PYh0fS&3mtZ1hVCPOZ=jVPbU&hI1ak9=m*3PZ!4=LS3FqIcH7O zQhYM|3#u2BSAWe3)(_=4wIcukL?i$JIplWg;AH6nmT?1OBnAWFz^ny8SkSJPuKipC zrq5J`;j3E9uLFwV-m)tTG?j6LLGbnP8fR+eex|UeZO5nl2 zGa;`NL$PQh7)emdsA16_!<1X2uXJIiOp~)7e%|=}e1jS3IiWOCVqonmsUX6KD2NZ; z9yJtu*C14@3ojvEdY-JDO*t=` zON#dDTvmhQ%LVu*O%uGC85ZI2(^cvt_^{QF=)+A)x5HRr(IQW|=-b01(70hWHlL$+l|LO$@+5 zA|$X;6pEwmzKr7G08&cb!a>e?8G#o+k zbmC2yZ(gfD9eTTtJF8J8R}<(=$(I+ibWFT66tU-KtRy@Q?NArd#|fqA!b_+=m9ZD= z3G)VZl`~l6Ym@%7wAVo8&A@(f?@rPB*TaGQx}nfK)L;8-#;+0TK}N}8=gvwOzQV_7 zeFdk^&b;nRx1=<1C$KmCAyk=8PP^2)-@|pso!!|i-q~9slcp>CfWI{QRxS2*LHx%~(XGU}s{Z#7))$$1)HS|G9^iAw_zl^Pk7q`dNOzrhRe{&X!coIZ_Up(Kn= z6J|Oj!rVizHayfe1|cK=a+ag^U!bOaKT!rHE9mFg=v>}Wjyx>S1nb)spsbXI)e)7e zQXNADguWPdZq7s*Zy5Mx`Sr0Vqid3aYtssXzd zFvN?0RH*%Y!aoT^(xuoCF$+&Vbj83%J)Nck^9vM9&#+WQ zdopt>WK7eO^MFzYcwxLX8J$@SVLPav?pcopRCB;}DY|kHr*|}(&g*g&0fVTi5{X?l zQPpb1S&$Whg3?Sy`%^L+FTW8aNJ#n|ZrAt@5ZKE+*ME5?yTbt)UBWPtRkZ(5YxyYh zna+iJ&5up5R0ZYDC*a_~cJV=rTGTncD+b20 z{CjCJH=aR#h73_qJrN9lU^ILJOtMeS4`gLPsDcDdRXW!|&8+ShP>1ywsY=r~h{?-{ zzN1Ne7`#YY?q1U&ib}BKsaz~B>0$3kz5smp2^%RET`QYN(XMW~LF@+uX!>1CBiV>gOA%dWF+~m^eFhd1B{=2VlV5%+h%Oc1>!S?jT3sA+2vZ9Z&EAC z+h&}J=>|JCsj`nh@7eIjR4;dgftm@F!;re7;SR1qRW<5s1SkBtR-?zz`?J;P*IU6_ z#Cqe_YxbD_yTd(`aMy+1_1;6nknkFVtP|SLxaTDD2B0G}ucsmdvTkygnfY0>`}8i+ z#Ds61FDrSb%b-E1%PM{tJ_0KGQ*$JV-SB&j~OgHNXWM*H$oQpN*aeX zGYr#I=9_N1SfJ01SsB#l3yZZrCv`D<)$n;N&_OY7rL|4~D-&);2I^l9zV-h{{9D!9Pj;^tlu3yaa{Dy;!dv?{d5U`S~ z3zCw%+&T?x*L@AICwzMB^)2wgp5-?1Ys%=%aD8$Ms4MB|G`-HcO7-hrj-xa~_HSLP zO`K#f-?yfn-z+GrPSLG?1a$3k@g15=rW(4btRS~w#rdWlehUcA@ zziy@Npr!1xSqW0MQ+b!8d`c<9x^pMhwX&Y;I%!+5%rN{?dlH}ysIwdMYR23C zqU5&zY2;yA-7RyVAd?O6ARA;{=qeD5MGoC%#E*2v3&qs4N;>?CDXNB2m#)$L3)p-O zBE+{otn5`tyPA1zfEEvDOF%%rGsYqBLAX)D71x2fY4W0LwaAiysX%`>K7nz*I3Lag43wByS3sD0El*5vz z6{UA~4qAeg#Y4=mKy+ibWZEDe@`D04gsD?0R|YO_cXS&ExCN@5VM#Q0A{!y@+FHjP z-D`!$PYYBM=WT8rc|HisMaIU3CxUCNk4MO0swm+1`{A&f!H?z%$QURZ^7{8MRtAq0 z01-fMV-!&R+0cW|(mR(YCs4u`d+=Xo8OGg4C;7AIy4DBKz1+^XTx>a05a2u}ruxL_ z*LU-ltq!w_e~oMx}rL`?>$Th9$}rs((!SCB7l=w)U*D<#D&B76yW<)0o3y^nZqP1kl{ zFPkxzkx1%TK@bABHK8djmHtKTH0-OCR#8dWXvF2I$tQp_v7YFh4qM_8w^;J>%i`@> zINq}7i>?Fc4)%|Y0IQiC%?o)Gs05S60O>#qF$zjh?C8P!SB$+sB}{NT&N0>`{NE9Z zA};s6cW$m8`?(iiql!Rr?ONbAcKpR<@yuP|D_0QDH_GWJZb2e--BdYl)k}Q=^k^X= ziyO8Dj3rurZk)lOw`f%?)rzzn)BNnK2AZ6C$h#mvnkZ60cP>=1_q3ymRv4 zNsZzAyg1Hr^Adb7FEPb&=x6+$aFcbGA}WYa80P(t1io;Ik2sxucb&-Z|cRvb$%DSU7xZZRU&J)Kz=- z9L1NpL1at^PuMT!mMDZQXZ$DQDDAsjhjgW^DFvUurW%r2JK|Fz+dwZuf_5@JykOZD z0x>~(8cX(b+~_`229D_Ml%kAf{W(COk7EexE=TT;{-$;w^#Bc141#nK-+N+_#EekdJ$@YpuZe2L%GP715 zWF4zI!(0KIBI8bxejLUKwWxjmlq+V|x^~br<+3vrMrNzVb;%#{gd>wsyvy2QUm9 zI(nYLqRLycz$&YpZgy$snL^Jhn>powugMKg&*pOK3mhq5e3b!3Z3Hmoy!2$X)!bU=XWI*<=ZK-8k_Nh3(RIQ^ME!MW@>`*!U9~2b?v@v( ziQ?;{?C{nsl0nrWh=^ZbB!(W+Bo{%0SiMSOv#e!q!_f#Gs6@+GnO-nBcNu@Mr{d1W z5@W{Gs&Cpm(I=jHhiJ}=pi+J**okOTW}_rK6U^V^p}Pbu4B@f&Suc);vT!6{??FHG z9-T0$a1|@e>m4(mD-p01uV0|kNynTF8&Ad2MKtJ9{Osvh&y6xNHbfvXK2%T(>YCmn zh{ii4y**0OAqmMXi;RrDiQE0YJyQ{x{`vbhr9MgX=dq7S%ln1zEb1CAeDP3m$i1J= z>=o7W=l%qqscS?zC2^s`Zgx5a;vy?cQWJ0F5Bovd7=KsW>%kB^=f z^~GOoBWWb)UZEL+imXa=_$gth2fAQo`RX>rv)%YB!jbtibV@X7z+Arbm}y^G##rlq z)vxljqlB%>M%x`SZ4-ez@Zng;ZF(HoGesWnkgyt7c)>WwBWo_)-p4MN?Ctl;a&z~d zABG8OJ3)@;KJ6oM6^%Y=e5dX2U!|EkdV0ROvOL^c{n~9Iobc6_xOBN}q8D_Dq+tt+ zpV1t+Q#79%flYrR{jvF&d#EGzxE_Vz=xyLZ5Zml6R&1BKWAoPRUON6Y(pTo#Q9bL( zweNoCttvkqY5|k5pBMuXGgn4-R+I#NY?qF>ySLg`&r+Iu_z=8qFE>W4*oP%rS)0+o z*l{s-x4_m4hTE}5KF3C4uz?U>czy4NN7K`5^ev&@2@@$s<5({8sW_uw1_%psF?wbu z?VldsAIrY5pw>X{k2){_0PBAp838<62tz>)Cv+)X=xCSjqOHq~tl)tUItUd1z8jAXG`T7a1Ba|k*;SW0LuuR3M)KH~@KaoaVFb{5pqLp{kxr(cr`N>E&OedWR0VqDH%cuXoyWzM zRVU1J{6yWZV-Eg)mPQL(3sY?N!Xj{Ur;8h$^ZAN(@QP;VQ;2qoH>ds#Xx(RpOHnp; zIs*AHbd3h%;2?oq&;IfqsVjAYVBapD0dW|rNHX8N) zuVFykOk%tqq=w_cLF1+W0A#sOx z1lih>BqTOOb@mPVCOko_b!Lg5i>clPgWi2-KFmIFdz|K85`pG|)CMEpOoU;qFg@c{rx z&i~0KN(R170^;9OATzd~Q-#buivRXXJwk!m;?kLj!1^gbD14DQKz&!24ioHMbfF@wXR3M4&?>agT znN@{`aQnT<^5dGk^!Rs`pNaGR8o$5IB<5|z)(9yR4CCJl=t9AM5C!*y2?kJlP#{H8 zhTQ#rFC6?s12qfW_Ag55%NMi~NU6!m|2C6!F8C_-A7f@Se4z`06y6jSGU@n87JWTf zG!009uh9a~7hePMz)V1pkL!D<9UPtpq`XJzA@SZkC4=zde)#D753kWa@K73%^d6Ns zovP9RslINAZEzlGB;Sg&o5A}(ZAbJKmYpu7s1cBs0Wm)0__xg?9mI945Zu?d-fzp_ zWq0R0LXgf7s*v*h4>INWU(4IrTl_ywZ}yF|uL(kn3PS6#A$TvqBk4eTir*Tb#Ow0a zA*hd#p8B{&F+u^s_!&U5dsrS;lOG*KiZj#Sw*UA92-eB~Qs0{bQRFQ5w_^(((lbaO z@kONpr)2<{?oofj%>Qltb0+Xk#y`gY6Up>%X06;{#>{`(`hS5?AXdVCr166!*g6wP zb+7U7U<-dNZX*k>_*d3nJBLH0{aY_hO5pv>e|Y`byz6gXa)w~`tbcg@yogExWXPtz+&=u$HUt3KuCem--mRo$F;8# z^^gawkPSq8aEcmc0|}w2WA4)_!0FjQLTcMcH$A$9q8>heRXuPYD2L%ehd;OP8+o_~)#b&r&g6+hr1jyuA8K7V_3{+LR* z^a09J0K~k967$l;mO`W{KpF wduTsoML?{3s7`?4FIk8*4aoVJGH3e()UpVOjrtt$98d-sQnj>0iU#=q0N&vs)c^nh delta 100712 zcmZ6SLwF_%(51tUZQHhO+qP|fv27b2+qP}nR>#TQ!RDW3ZR)ATS=4*#RCO^#^wOUM z4uA%LPr&;-0Q3zRcR%gu(5T8z<09*I{S3;*X zn{&?J)inIW6PZd!mG(>;b!OAawyY%C-CjibI9W9E(Buu@t}0e%q*0|K&Zbnp5pIhS zR4CARU1F2D2a>+?O05IFJ zO~W>K;cqsro?gjr$&+s~4`8z9xs+R9w#i%FwIfHHs+<$4P_>$SwbyVJRj*QNYqPOy zYD1rHy=mLRGg)KzT57j-xtXuFVvmtdW|iym=ztfPd8hw{Mrc_cHk2WUzpZW7{&Or@ zuxPcuo3j$UoECecRoqi$%?=##4X|Weio9McK~z$lu*ORDtKZoTNelH1DQ*bM9l z(`pEAdND8UIjk#?=i%BW`|E}6>T+#)yN=s|o;X9)mU3^Oksjmj*s45K8NWh@FBmBz zLQX+Wvb#=&L(r}$(hLfUP&!lC#!cX?X5qvS=~_M6eNdG>z}3{F+W_if4k%uQoMEGG zwPW?*Y%0BUu2~j5AmOFO$45x+biq&RYK@iF_qeHE#$WLtuxt*3E9P=wcwXkV46+_! z7gj5VpL+r(U8&}8wO!?g109&n@#}$5Q`#x4glTw+0lW>F1Qo%>S;-R7Qcx19s$fnq3PgT< zzhAv*)>THlk7y1O-ZU4=Vn%V-Ku%bR&636(uQVrSGBQ(Ncj$K%Qhn_V7I`_jedV*L zWD2`|1rMqYXv_E-zQD!->e*Y|&vu46?w(=GcKtbuS~E^9Rx%7q0VYIRW}+rc;L$8} zt@KkwYUxvtQfd?LF2^O(&U+J7J&nZn_10lMI&iIjX?IV)u+I>p?!+eP%7(R+jiLzX zF{K(VVQEkg^c8;LGCd`it%xa?L|2g!QW2IAS|Y=>^UzW)6Rp6sucZaefBq6zS*H!d z(Ece%sxGL{gq2AY1pIhwb*Fnu;*VD*&>1i0n?Br55R`IfMpTnjNMLbYQ2O8h+R(o$ zpgfou-1W7HO2sv6D+!RWsgqSBnqZkG8E?X>MEPg(=anfj10PbUCI*chVxiO2t0F_y zW5upW8m8Wc6;oDO*@C$}hd=vfQ8Pv-c4v1~%AxRO7W7Ow0$7DivhvF-`+>LnA#_(g zsqYuBWF007MWB6KWl=KJ8Q};gLFTBov!fqMYEMz6=uOzenoIP0ck3IerMpO~s$?se zYR*MVAHHZwu}wlbr%E)P!>n3StO1C)FooAU(4L>e{P_3fi1+kbDfnkTfs{hI zw-}`q?TQ$s-$$xyWhtiQLiw`)WW|qpy~E6MCNJg9q-N(lKq{&EsH)y=m6tR}pNh)P zQJ~4;`*G?P2AnnxVyj!rD%E4DfCxw6IoCD+_T-xx18A|%0+28=%7La6XRs&r;n>Q< zBv`j94qyo<>OGU*7f{Z_sA-abTEUZ+aBEOw*WP%^3ptNcn$A;DsGX{Zt)XQe?Hq}O z5E2~;2)0PIqc^XduYSYuH*gQV**l-HBu+R&9#Mr4#JFjtzp1?wTEp9zukl?r~V#nnT zsK{;{eXJQp3Kp$FH0emXNCR3i)Aya`U$j^z0PP$qDvmOu;bSpHSK>FY)_d+|sM&Jo z5jsakCRkm34KXQ#aw%r9X49-VYm|boW^#)%ww8o~Vys=_V(=eaB~h)lj%WlIY;C7w z>3|w)CH*Htcu6H}zhN>J*Nl1yFm0;pMuMuC*rn#SNltaFvnohUF$Team@ydZ5F!&S zz=aj8K1)dc&HC|#s^p&Jd@(28qdfU~iq?n%`2~{_H?*bL1*QOnQ`q6WX%Xw~C=7Fq0I3DR8#y8*pj}p9*B4&YDQgUwjC9m7rM2-c&&|EU^KK&qz zhs#ex0BN#r^b^BiC_DxO-27Y;V7u~Z+cD20_snPZNk7Ox?l@u6f)69#MQ{7;%VA&y zvc5B+EFU7n3`4eg30&!;JC3Q5`ZYCh2P5EMs=tO*+CWfTD3d;Naf+UUL7FT*8?Cgl zO%}XX0%_+O-Zz^!NTK}CtJQM@L^E`~b&)D~3~&JCPF8TnL5q~<`~?aIpqZ;(=8#W7 z{%Tz-$~}Xgl8-x&&kEcl8fP@93+{@FTcVp2-6$jIV3CuJZ|D!~=LIvYSVdxiT&aa* zN}3+Uc;y8*lZva;5)}X9ph^ElYD82Ws;+K#6$9Ug{=hypzh>jGM5DiOBa`egKY0?h zn9=g-=(#RU(bYEQp0@NCAo*WatzS$#|KTV4o@O)G`9rF!|aV$AkRp; zWzL@XTD!_mgV?GXUx`f5PPFdj&miRv>6C6zj{4|DrANQs zsF$NuIMa@R=1_c`kT?V;|{xl{zQ4U|&SCnm(Nc3>TF&pph?Fv;GWZp^>jL z?CFN4=y?SAqmt%DOHf%^wl_Y;S($-0&V^-V%62Id zB=K-lAFIHiwzE}jO+5*a{6N9yi`5!uw9%k~%j2P+(%b0uGV-p}1H!4H^2{6wc^g%Xl{zgwsAj_1B3 zHX51GfkfPX#QWx^)Jfm8Vmkn#3c;rF=pE#rnhyDxH(0x4}NGJ zxyaqOYcknM{psetbi-2O2bZ}cxxjX(|AGC;1x#v>U)+`g08T*gA-=SO+D)kYV`_z= zR13DelD8AJsIZ5K_MQ>-yK34}^bC`__HmLPE7&*B@Cs83#ND^eaDSJwbuIT5On#Gg zH?iRn~Zwxp7=ZDZQT*t=81dFj|luOL`(?;9pqbpuehG< z@W`)_?|w0!7o#%1M3HwV_Y+C~P0{s?;BkR_ahQJ#I7HEFfODNFrT1Td0-(B9+tfHY zT$_?Tugsq-%aVCVnSQ+b~EpO>EO@(_5x!+)!%sXwzcu*)jdVM zx}TetPx$fQzE65Tdhh~-_lM&D3G)+tlp# zdsS!z%uL?R;(PaTJpO$5@q1pW?*VYVOcd^Ow`c%HyPBT%rxXGLUL;Q12ru6&b^hzn zzasZ{&J6ypSNeB%$ND}@zikL%6n%gPgHp_`*US9xll(LQ(r@|_!1wUQMF8^W(dXCo zlHxFa;;!d6KT_b!%INqK!NJ$&qwCl0sr&(8fG79;Nh9$2K!E2r_2u=vZgdc%b#q&D zi(A&+Z67S}sksd%=gqw)IL4P-RRE>Xd-$O150&!xMy9xUbS6}cF93u6V9OBTs_=ts zIer8~II}bn2=DEzzXh6?^BQzH;njEjdTgQ}>zjASr;l*YzeCsV6Y#lpa?-wcBsK?- zN7~*!>Ez+_+jtuD(ambSVc$Jl#n$cIuYx)*S~&)7pn~a0?z4pbx$Z~U{cO$rJYD>Xw#HbAvWkx6xqp@-Jzh!~6;$M~`$RBm8%_nw^`inB4Z8vrCXsqYCQa;;DRg zIwh(+Z;l>{N8%+BiH|RfJ~l2UHlB_?0@lqaahCIHAlJCn|u%__sOk%B%smuB~F zWz}V;yxAOJklbAOl>&_f=n#@yGTjO*liIcgLf~ybP!&hbyojLmb`EEx;5Y+L&Bk$n zS32e$%jEfEDnB~36sRIJ92b%2ec7{TzH|yUcXfZ;>-F3FdbV+=(`xb_1J+6jdZpN@-2Rk|I@gFdkqVM;fl&&^|pb&6^Szf&y-(7D@I2fSVA4K`I@T)}~*s zCGrxHma++Z>~!2tOIQo{Hi@LvERPkn0I?Eh7f({!NSUPkZY0Ztl@Rem>@}psni)1D zIiQQGmVt}1RXT$hB{@d9K%8(<6=c{o5wT)%c{r{Nkxk=9qY-!DxS(9)Fv z!~okceKp~@v{>IR0y->gdYJ58$aQQ^&0BU6DKu;V(3P9Uj+zwC&$BpF{F4hlk;(-K zY`LQBaVd7k)NCygH6!DsdS~ZHau)9oX^J}aiDQx>qXFs#3VwZ;c*r_M64{#YAd;G- z>f%DHM_1~w7DuFXAn`TH30j*_N(HrKxI;IkmZR`WWkH!k@vGxE>of^SF=`QoBJ|MQEoP^3)!qP2 zQGa*rUW1`Y1mLYD(K-u*mskz(yTq;`X!?2vDI3hdSnQ2M#9rB@#@#;OoRcE&67O;km@EKt^?v9o@udRp#aG)$)Ms1tC7OY7Uh`4i})=C0<*u{tjLU3#ffv`TZOaP6#$m2{r(y0t6?_ zyCXU)Ck%HEctBGEf`w2p%QkS&&}Y9PCTtk^9V6Qz-l=@MmQjA(t|fdA3EEo-PWks9 z#11}gH+6D zA~iw3YwTL*9Fux>|S zRCyy9TvNqUb*bEtHm&&)IFkPyXkU71SNnjUUXW!=(FT3(V275sZscGIf=qYD&^5n@ zl@IiT!(0*Ku*qKM7KLX%CbdA+(l*S&R|f%;d528fw+xifBaQcFa*uLU%_MCF7GD1D zyA{{xd1s}F#6@`_g#utoi=bQye{fA$U=Ws@+5@M{EvnW3njUo#^->7*O85jaI@!_; zy9_y)qK&)Tgo_@SnvqKrT8mYegX!qm7O%|A7H5hlDT8yxAEd}Q4dZCD#J?>R?Rc@w zk>A^vWs)14EN@fp)L`Kxh=A;SLfY#TG>c;j+raS<+)t&n3)qYBmT-5 zN?Vx+4%3sO!)Iwh?zROKL*O#8--Cl(m1eRN2eNx+ZvY8uxUx0j)hE8Guh!%BsuOG} zEt+J`ebB;a^_h`x$+PftY&9(Yac~4K14;BC2ma^vJre#RZ2Q|TbRq}0IRgE#Z<;lCM=F_)ws%jo4A$US&o}B6zg3Fhl>u!F zv;ZrE{b{x!(FAoX=X9iXj z2purZJqM)q?BO`569_BFPN?2S^aYknad+jO>jCuOy$wJvQ09X_pt!P01D2a;;y4#| z&dM8u@nUCj<%-lP93x^+-Ag?4wKzM`s(K=YpqeYwPeNXWDKO{W6WF1Sj+;CFH(O;^8-J+_hB>$kn|=J}plJcZ3(CrnOt%jI{( z-!kleD$#Z@-FCi}*B*@O!1o#(EWe*z%w&H=ukv_DyX*FRtTfnM$#~vfKSr-r9;Pkn zttQ8F=WBWO>p0r1OgQ-LMg0EjIgd<_`#nGIn+1IK<$b1uK2eyuGj!baScLAeYV;)f zntjij#Kt3Zt5WtxdVFuZONNLs0DSKsZ}GBm_VQRbJG_41kv{7X=n3e3E5H0ox_c@; z2VC3;Za-#do|y>?nJmUNHLFuLLI-|GiyYCY_Pui-6L zz5t>NVZAzkE|bOGpIO*|FB_e#{y0-(T(=gqQidpAWle z^xbN8okVZVg?x?CYq0a~c-0Gh+xL}!KLIuou1g(sUl+jd;)}-kd;OnrE9T%;?hVMvt2nDM&cP)7n!HnO*1vrpdvE$2<-5*ZtI7U= zAH;AcsQ#gnVB{iorE%7lBe%9sPYtQAesLrJ=wB-vVL^6rVHWE(D2Xf}ly2AzPgCS( zVMSHHT{t9Tv1a}A5aaAXD@5vQo4`DVoajkTEm83>#|sMSxLZJg&8? zbVJEhiiMj11@EbU??r13DG!StqB{_$8*pVV7CjcKY`AS@So0*cz;;Ta!e)*}` zvsjTzXTnK|_ll{ayr->4HWBEyY=Ck8pm%KU%ozY`R6$Z4kl+Jw+&%iL`R??1j#7t* z?bT`i%;B8w4B@!|4#&<3P6=Usmt?mwQ$SyptJs?6UApYomq`2J)U!5xpXfF?uANT( zppd~oYuR?Y#Tm2V=>z%cs@{GA_E)bj=yto#EU!K5MhkJCcO}7~O=KD4p4|pyNVoYs z6e9urmTZ%FCryH$i0#B8AKiLD9dPCR>W^TB5Q8QS>VqekQXYy)<6bdBGH9a>>iZeH z(f67{h2*n>KFRW2cdVNq(uO82Ykrs#8o<;M{d{!=w1yS93pYox1v!1e&GV(vN#O1C zFjkTC`2YTJd4&yJ(_)Brp)>opGU|J~@3#OttRws!^AH3A?9OoPjVSb{Aw;{6qiDhi*s6psst zvn<9`~~lYkNWfM_AiV0$lUq@@xV zpn~Qfpt@lF&MI<5X)Yp1!4*D+dVcXObx+8)K~2L>MCE6Rhq6pshkpF-;qobCb^NKi zY5GlrWMry^d}cPT)R_RDt-d(>_tFY@hxwn9^A8#E)SGNJOODNX`84e&Wr*C0H~KD)Z((L2lYx$XGXy6t#T z(|T!Vg@qB&RMqAzn?JJ}V?4Bh9V)XF-JOlRbeDIu@l<+_*ff`Se1NB8=;Ulu*%|Ec zrB=w6zPHTgGv+xX5$)s2c=Fa0Mma;_wQrdk;<ycKXTOBzH<%J6n+CmwyJID6^xF z2dL1sgXhPkXNeA6!NBv{+Sle%WE(LIpL2GFDo6?Jvu8&z-t@UFXn@-?`t< z1Z8`i=f;O(LpZ$hK&=yU)=%G|=qABavCcRqxXT~f!3!<>bazvlq(asabSE|!40e@Q z>F@>VgcG)db?GPA65l;{5n}T%%-Ljd0n&$2?ES^-Z6yPjgB6ag>dvhW$)Nt++~jN# zRp@x8V`TpG^uz8O=JM)EM=_$J;3kWmzASqx+93&aieDd2hP0|0qpw&nKwJ<0E6q;@ zIZ^IHFHi?}9hD{(ke@-{VfMQDrsbCpHnw_wnA6DAHFm@-NtcJkKCpA$(Bjf`4d{j| zO}GZe0z;~2YXQQK0e8|wKS;N4=mTCX!q+*#tG{fVMARJ+nI6 zQHE4*d*@|`rN5WJQ zoi8iVCg-bIRUHmPCXHR-%vP0&Fk%gkFO7AjDm2!l5kHS{o%1cGwgrv#86cJUyTK?d zfs>4mNL)qEZ7D0K^3D284x6tdzt2!L*jw=6wqX$=9K>XnZv>qgbC|+{MZ!EQS}(M; z2WQO{awpHNVq3~Gmm$n33y4)}sFn;dg6$=UjEx`pBj($QuullfJ)I;Pic|z|q6La% z9iWHT=vf_SZweK*0!mw)zVfQE-T|y3TfG93Bq?m+r4?Rtr4*N%>a>3tur;HtBHE5x8ARy8oT!gp2RAr zgvU}_BDv6#g-r;1f)Oz+j@XeIwn&t@lF^SjEIBNo4y$?vhWu>s$t=uN>QC9A&F{F`>a1D_9(beow+G zg*&5xBgcTzAU!|wfvjb2m<@(ZY@kwM#zJe-5W}Nmpb!H*?77P(K|yg+mBf(VzMQz~ z0C7^7QVT+ETFiC4+QJH6NKsU1O;%_<*bq`2(gs8}_57g%X#gwnvJ+x#yZ(vX2oteg zb4pfqS7FMl$d{ESdN?fBz$he%JUnb+EUk%fS2kpdEeTh@Fr4@XeoUhr+#*7uEGJh) zOCI@Sg=bF4Up+9;2R^D7Vdp<2nox$!1hVi@OhHnPoBP9>B82!8A)CTvWJ*Jj--r}D zBzPM-*07QyaDZz5u5{jH?8-1lS4^f=ZydZ)iV6^eu%aa+Q%7e^XKEy(05I~MQY;Vb-X`^msF%x>a8NTML*NYj|h+%ffbLgZ{91BSrjWHB(HdPvs9 z7~KI&%JGwnK-tCD2+fLQl%HhI|Dp*$M>w@Yv;pZtXq4H;0mLk1ly{|uEzpDN|Js8F z#tC860%KGI5{XjKU{jAuUYs%{g8e&ei-KC`&BXr(G^nErCXvfAazNXz_PaFu&M)uF z(&i8{#|7R3Sx}0cf-1M<$jZzufE=0hk4Q5z0>j2kl#L4e%3*D&!l4+O!eCLtCwGhf z;R3)1;!^l%g9}zU{pVG(8$zsVOc9QmE3<(ivSx#!j$=DQ^NoG$+#ea;X-ocsSu!ai z20JyPO98Ejh9HHAGXqmx32H)IzHBU`s&)q~21qXr7x*Lz#Wqa~vk?ZsIipelk56!N zT{xj$6+V(fh~qg93(a>imzgTNL7IX}F9Plio4os3)nGix2~Xbi)F ztOA+^IT4{y!Ad}&|3E7p`vrVZ-;td6kg6UG1z?790EN<2T@G9&Y`xchWH^=6bd z;h=n_hF1qZhY3~pl6mF#lL99>_v3ZUVq?&%wfAIE2GnHm#c22!F&85m zuZ<#^?b=0|$kAP@3L(O5{O@_+*1<85OrH^EtwfxZo zQ;*km(NB4G*Xv1?C0BSce|hDw*hy~L8*TtP{!5f^r7?;MnahXt00jGrgHguMOQubl zY|ddMHJh&HTkm~Pr?0ThVl!^H`+oZr1kBJ8jtY%_E{9NlhNV7!uW8uRHlWHUzKuN} zrHtQq#NC$elVH~VQ=*s_0edYE;L1GZwV3ZQ>3T`vpwH=faD6pK@-}UBl894b$Iy+4 zN2_Pe(nCs{AGoJD(=66|qT?yf^RT+`CiXf$748A|SASbCstM;eu!iq*`db1p%c*<= z@ALMrDR&PK%aehleYNc^2k>!-_|$bguOzTKrjXHObiH3676W5^KY1y3#C7vlTKC_E z{yLO&)nupBil-#!7t+)1_fa|8Lh&IsTl?eAmCE95sjYTarU(D@%BGW^;TCT^>)*p> zImcz>IP+0lC#UUv-gVxX-8I+U>vNTw>SOMX+sN&JTHTS*^$`1{2cY1m9G~j?aoNN8 z>`vJ{8>mn?b^Ua>j@@FfQ^@z@@<3qFUZ8MpwL5&QEV)G5(i*}LI{Ni*GyLZ>{L<6M zA=dP&n$PWFwkyEiP<4<#40U0!vE>XH-~6{T`}^}qVXpac%wx0ICa=Y#@w__wrbKGoNicyaDZ4lb+r3xZ(0O+Ki{EFQs{z>Q=k^ zIRoBl+HoVO?(W_9JS%ljP=og_7umLCj?Jd1{jd$!DE2C=1hCwuk^Aj6B<%HNK7Z}% zaqY;VT_vxDF4b{;;Bo0&Nco;98u!61ZiF>SDX#?H-{hTN?Z`x^Nu*UMjOgaqMtK&x zKS{(+Y|-QSdi>-jykAgE-%(h)A>h+NPT;HKwBCjXYPh?Tuc*&fTX`LMD!y7Q$iVE3 zGCOrCFt*WC0%Y|#{;jwY(tQ{}7yFs$6-T!PJGEUcVCwYu9U2Qt5xX=t^%kN6cwt#zg;QVEjA> zhvRgfHgnx>SgZSQ^zo57^#6U=cNlrn=#$qaneJ*n1EfxsqXQZT`2X4rUF3AWbIk2_ zKgr6dzc;>&wJw*{6-ixv9Hj(l>^s(8!B}C9E)f{N_y`RB)eD~61 zrzdHU1Fi)4k=$9meMGd9 zsRg}{FC}NUcP*^zd=%4dI&1mu&G#K4{2gvnJtqd%-AF8Lu-$LpcCS%Zuk54|4tEBv z@25>Z{y$TCcjXqrDtZ3N+9S)$ga@Qvj_!=FAv^S8>%q zz~4M{8aidCa)D;XA@$#$&-TyN2@igIy;kO<5x9KLyOUpG*xi+V&Ax}Kw+*k6E&1EG zzYagG3%nyPaarSUI2|^1Jxy(Hr2-~c!*B67$3OQ=U1TVi^*LVzg z^j3CswT>J^x(U^F;vb#;uhdXb&G0cved?Q}03RNTSz&1H_`Ng4VRqS>EpUz`z`ig} z8OpXCeP6JLp+YZFD(!7?#@1;+njm6k*mB|{DtR+VH7(`2J2 zLJk@y2%6odD??^@x9|Ht&EEU+@5`NjuGQXw4scM{=yowHw?hRgUHPO`#gH^Gig|D|$kx?#cPDYYcmU__-bNb0W4On(vTtdOFV zkeN#RaA36L=GKUFK((u?w4K28R>uhmuDltkG(SfZTcSK=n`))5&%jQ6in~-*dvJQWjHbQQi5M0!D5Epsci5N=U;Bk|B|)aupf7*hkFbKleyy&=_4WEB0=5-c{b9 zRd?$sXe5;pCaeu%=It~p3CCiV#^KUt>DC&e9d8}OXO5LiwQtGFV=GJkD(i;G+G!zP zIIS+`keO+3*x_UYvcy*bdBhp}JM_1}lbchaWY`LnH;50GKtfRvpcD~;Nm2>PUXT&c zX&4y}X`pv%G`NHqD$@#v=u0e^fCWam9A>=pmnbHKEdm{Jx&VJ1{T*1kC|JT1nut3l z4)Qxm5^jFrzX+X4DkX8?zyX#&jWHN9511AdPLyi`%>DA;<9@RF3VgjDj?T8L*k_+* z`jq`bK|E0?Bmr_nKIGJSE*mUJU}p@34e6zJDk(T7D*VA9 z=6&ojhNEGhVr;v%z}(7vFzVFN2q;k-f?hj9p>hu_v|I(A#YTXQh4100a_5P9O0F;! zkUaz-=nicN%HADhbjY<`!&b)81zEP64mZxlWe7yR1o&eZV4N%f)bSaVB-j9q|KKD! zcN17KbR3H z)X|%HyX_qV(4xm+6iuHSg6zt9!zr8Y`NfwX<4AY*>DQ}Tz^ilTa$GCBD_UDv>%2nV z^YzfLxdQL)6tMYV&Gz&kOY6ygCBWp|{e`NGy3uvMpSPMDif7-I55vEds{MHB8p(Iq zYq;msWc%M)dmR@WDV%uhQqw@)Pv;_WL{^P!>iN?u$ z=V`ymVPN(#eWz;fQPOX)Jydo(3z9W_{7l;#8~qCR$&D-pc66Vmp{NshdAx{ky=`Zq zca%_=;ch#Wjqv}xZr(P1*imrUdApjOnE?Ovw*t^})pq}AU-i%VXnP2FzBRyCJT$!| z7en*{y1!qtR}N?5ao!1~LCe2=$@>>L4U%*dtqYwzV41D$n zpA`6iO#IA#Hc-!5m`iT?Vej~QDl#ASg6*ru+u+;%NI#9TwoI7Y7FROmx7a6Mv8GVW zBGiF!8=e=VXM?MBde_sxY7pqP@BJHIjbpF@__y+xTDUUtdYp z$koQ2^*{ez-lTRD%~=OK6fam!X=3b?Dp{foP7s}x0lJzi2gRGK5Mx-=*S zI0>LwZLB8~XB}OIzD(eVeVr%k5YD5UOTd`ADFvJ2P=zv`|DM# z+3z6=mVYNAUgEkVO3Q-s-j`hqZlE`mYc)ka^9%7m{m`eB=%fuuAfWO8fH{C8MBmX1YkiG6i~(@I=okyUMUp`k*~I<|tT zjC?7^z1N-M-nk~DK{|=;D%SmE{=G-1}0QZ zznbbPOAukk464PF%AS#81eZkBW2kNxsV5S}m9U;w0-<6h78^^o9FeB1=b0iua#2&^ z%8ay>oKj(dc4kOxM=3A1#>NL=7uUuS7IYba;}g2dGCvgbipqx9e=X#(~F@zwHj3G;8s^ty~pI4j|eXw{g;!=~wTN6igBfi$ zWv>aaRiqg;bQu**Pg8{)^uqoKATj^_2iyXRmY6ft6has(9OM`Xhe0F>MT$#>0Ruuo z;xLRumJode;N&eLR!tcq!Jufi7sZRf2=)<^NA3hiB4jWD>VdkGNI8KoulT;tKKLJ%;TwizWKd2N%by zJ?`Y}3WJcK7o@v?U5xnX$m_LzV8n~dr6;Lron9{nh%?_$O+;dlpg^RPMLZ^Bq&^tG z;`~!Y8CL__V80xv>5)mhgQ6MNy%Y)Ai{ez2@Af2HL90DrMX%u-lw2BdE{Fsjo)o{vp{| z?B1vh$f<@+q6-o7N^F5{nxJ-UgwC-I?58Ka0Dt@jMy2;8_#h>L z?)u?M0D~lOfxB~o>2o5K7~@A^fO5qJLXzqQ2GW8MrWzV((MsyJ^-E-qC=GMQKN&~p z(S~i15d)=gixJOLIqU5pM=_T!2sXobfc5~^5xm6{yw1xn5U)gx2#x5QhCkle^IlRX zWT|xAf+Es(0(e@v)3o92;Afg5dd)duE61yMl4LuqSx0gP|}aT#$9bHxYF-$O%kr5E^Kt zI@7YriQlP1iQoSOjdAiK8piGl`IZ2(!sTr3v^ViU))*D#H?ABvh*qK8!Buw^7HPh0 zbH`ct;k09g7Io>RElI%imS~fk30A$`sVS9X>q~KHU!ewbm_iM0M6~0-pquqc6l7Qx z5}qgD=kc%mrCi|k*9p~KHG{af9bXhT2w8Q5hyOg}1e>8uQ>z+nL0( z4cCV+@S%~c3l1p&F{|Hl3nOz>;_=R6BTBOGb{jh_(grtaJ;<+ zcC$maERcUsb`mKfc-VX6H#@vvEDB@CFMB6vFZ@zu|7DChxv290y|>G0Sl9y|!a*a~ zmPF#){Fz!l$Lao^>}Gmk^@SPq++eSh-oh@85cqu<-4EQ} z$eqoy`_1cq2FY}Ems0?Iy`LB+f80y1--D7l02^zcH5H)A6byGg=bQo$MKP4e{D9GHbdd~pT!4x9b2DIU6oJMLD!*=bHeGj0vb#H1-)*j z=ec?;XyFQ>8fE`kSnf5r>pM=ccktnVXbL{-vJQ8w&85V2nLGZb4jKr>X~(7m3u@ zu3cRehpV-rIlP5h{P&;Py(kO!oVfh=ug%ZncN>LT3jcMsk7t1i4Bh-wf;_HP+ovap z-H)qMI6Z_+pHV>FFb3cBRU7xO>ZkLF1ghIT!J~(b1V_ykpnG!dtLH78hsS`1%jaaW z$;06DZUbj?q%ObXb62G{p1_xCbxw%X+)FB`<#riV+}#$oHgcU%VJgcp8JCO_{5DQgxJ(>M zLt3t+Ns3Hr+Q2p(={!j)=wf@l$$h3jEYB@bsiFms(At+#&A^AEWxy6-h{S;O=SHf!b{5!T<+KucKe6JOjb+e%~Zy)2fDuL!?8C_x3g9o!|D}Z6>ltVsH2F(?7CIk_a7?u(1G#5X1`z-@YbnqyLfv|1Vp3pf6p=aRV>DzZ*$PZ+{yg+6k} zfVP-qjlk3kdkw&%j9`TL4^dm6h_NE$SXGuD1ImWfXq%Fz7_i>WMjI-_985!WC{tc= z;mtm$&Cq#*tJY`S>fYuHaGq91Mze~GIwAtF8>);4+-=v)H`;gY9phu{wB?K8H$`Oj zTROD0npt<|8_L7LYbs>ndC52MAiozS_)rVnJAo*XgK8)TOTVjx@G z0aZDmWACNfS+zKf3k4$nc+~rt2i^+Se3d&j4ce=4?UjWbCY0<0Wxc!l?z{8Sju{qD zrO>w#HxN9<)ew`Xg*i8xKOm#CpBV63;A4jMJhNtcbG<h&E@$>#h+*OMDg^z(j(Q{=6W96H2oJP;2I> zBQPq>5DOCz64mQG(RPr&7t2OJ_%@l^>BYeF!oJb}ZuXf7rUOSd@58vsG&h@zD~q8b zQliB|fC-6B0FgmeCJ6B}B@bhZi4}3jsUkgJ&I4iN3t}dSr(_`cIg?Wb(@Qx@?UMjo zh3x~O%98{tjWt}tJc5m%l!b8t{!IHdW|fMv4S=Oc-vNEJR~$%YR78S5WRxH_U?ZuC zs}E6Pnx}xWX+#kUQ7dx}nUZmAR1^OxIg07O8-8!sywJ0aG))y6Lyb~;E0_#bgp4ZC z4YXXrI1~-d(<-K}#2o-nxP>UEhPD{w1XWY*Ku1-SH>Qw6jR9jIOv)q$h)?SZhA;&s z;=8vb%7WRP(OV{S=69w|Fly>(tq6ZuEZ4)<8**lZu!a*3600%Lq`akK$D?qb$?Z$- zL>4HcKu?M{%CciAi+RJ}t`^Rx)^J2NGM39qMpkE}Qjju5H$t6vNfIb^$a~^K7^aKv z^@#^LL5dLDGyZw3c_OX_zV)EccC=OJSp(FuKzO>!8z$t&!mTsTK0pD43)8C5~c+X_~N8vSiOV6!TJ*kAZs- zHpY~cruh4P5t0BQgb(ILm0N!8s?WJO6>eKAftUO}ub&0SMj(I>FfE9ipLcQ-pwH)K zuOimtyZrkv7O@8$I>Rbv7jkK^721XIe&dKTxrd-8&QEk@fqJn6(z!cqcnMr(t`+XV zh!wVE|E51UUIOJ3_9&xUk)3ncrU|MvOeFtmDV*8pemsJ)VJw6M(F3n`xjH*Ai%%xH{+HOrrkF z$2KOmZ5tEYwkEcdC$?>CVp|j2wl%SyI6M2>t*w3Es{2=0S66jaSM~SabI#{H3C{d6 z7GpA-2!)6YqCpFm=6Hu2u??}PK<&0bRGQ$)!c6Wat`bz9&J*(yQEW2S!fS=hYSu4W z*9dhG;X~P7;2cG~x6_L<97VkQ`%5qOhZD9`&~KS&43(zSI|kmc7l3<(O4mDXzj~6- zt9v4RpcYgEeW|&p%@|nFyO>vJPv~Y$vKVyW@3P=_w+tnJFMIfKB^foBugm)$UdFvml#d4MfOh$``=>>bMzJxsBb znVLO{fwgZ{o4lW=Id;gFoT;>66jOD;9U*gE`OBC3;gV*|p~2x7d7aTA1ojQ&p;;7> z76;*5l+%I@f5H*^nkOH&V(^`jD)?sX78YSP*m~;RT_J{0OqOK|FmpXEtRwzDBir=U~w1!Z1x*W@lDytoETyVWIn{<_SXNNj|jv>;9dfG7W6eB zD~N**n@Cln*}+skdd z44aR8F`(U1a&Q_4Q#VP>!4*o4e_D1l>DlSE#S}+;cZN-;}Redj;F$ckpr8 z*4>FOkYys2fYi{|+Ed3k{b5&Mx8Z#%WY}Ky2soH|x8U>z0Kbn^lNCqNpO1(Rvh81o zlk>lxuSP}>h~{;NEq2d0g(7qFzHU2frfR_J?ED{RXG?GPXNh<9#HHy87u!h=B7LuuM8nU)@6{Hrm_C%A=YF5rOP}X! zfGU^Y>3r9XzRxn^u6m~duhV^>hu?G03|Fg=I&F);^|#;KGBD2*SfiV#EK2r;)#o(a zB%rwqw0kfd27WEQyxmMe1N_g1UI|{Ik#aYjD?bB?{3T}Tb6`|eEeKcNfMz4WM4cOm z&ZI}1Gg*N(SxQ&8U@Tl(9-7!F!#zm6}hf0t;lng;Av^WNUI%dw337RFEjT#Yl~ zFeYA2FneDrmXYTuAb9o5+aCkRf8T9g9_3B3wzL{Xj;0oV73u+ei8&G~g4e1>o=+)( z3UW-U^3jlzDsRx?_Ya@Y+c_EtlVK%kzr)OSjSSHyvCEZM(>@-Q0@|7-YQ${j!&*SO zMDTjA?nhWTF!+pujV}u$e#AnE-Bptt}#b$we5b&Os%4|mHA`~B;gAI)0WYj2`< z9&dw+%7fc!=6b6BCN!{=W_)}u9{+B#TzREQ5L_e9pv3(h$uAs38g@(0NggI%oN!Al z7>P_t*_~jG07Wr01T9Uz(>0Yf!aSbI1N`tk<+HrrTKCy&F$NK5PMp1(gtP|-xdxzs z7&TocCS7B??cy-v+86(ukkQG0(l2J)rIwhc+2M9tO8lYNruv2yV{$bKlXry{xN9si z!PaWsHAl;aD3Nk4p6GfKPg$T{k;WH#l$vkmk2L4@4u;`ab>`Jk+p^fB%G<-KRsAQg zhz4aGc9!boKw^u;c;@|;j)9+yMF-5T_Z#H+~*5at}rm4EGs=ORjy6x_%CN&iYAJ5^aK~v1SfopK^U_Yf0MCv8iN}t(& z7c~9c8*h;^C%}vp^R$#UjaW*l z!kwz<1s9GUFhaw+g0z5*33m}sssd%PH;j?c5nLy1pQfm<-eDoCjMI(aB!QC1dkfhe z(W0FO{S&f{0WgszlP+jLmIUn{GW86Zs9PVP}=C^*T0 zpl6~K@splbSS?FSH{qu_s2Cusmf2M&1UF3!a>N?eMpj7_3*&mADnYjl-{JQ6j6rHw zu5e0809$=fLePDXyNHENW}I23d}z6>grbTA@in9T1^AX&PSE?jor0)sEH}+KoNBgn zda}w=E38XWPCvJF;G$)Q<2z;-q}EY8T2vV6DpD`A_m)L75|B%)m8Qu1OPFk`_Z^U*bo z-JMa-$P?PRu%W*wX9C<)k>%>!6z{xHmrbYd;_+VTweH|A-$dSncnldmasBji4w)V$ zv=d@8holD*(5}VBETU2$O^O649S9WXcGA{%t0bE^Q6CZe8rhjWc-kz?x zX3PD#+dPkxCpBmN-p|Gy#Mh(7H2dg3KL+;1j4w83sx~$4`2l?oSx(=jh(MD%CI|2L z2WYx@E`P7e`kTAy!T4wo@7WL6zuUT>%MRW*RIICt$*(@4vx0ejnf?j9Z^#w9>z(j& z9U~Xo)z_A8$UTX<)Ae)D4*nu~;8bH4e(=;dr>+P{*kfNs~pV&q05u|b%B!iyY{K9Z|08=YVYF25|&DSC# zyP(U>;KyhE{9F$@gB};c*_|#l7>zbklcNSfM4IVZp|so_Dla9CE)q*s!kZkrtHKmU z*)s+*=JWAiqpJvB{_42+7%a)KwyQ194WuL_MFlMQcL0#EBDgjRllo#T&Z(Gv!TM(o zS)QWwvWTs%)#w~{6Kvoz@v>(m(rMgIA@E$_jnPjxy5J@(Pvio`($Y68qvo8DrO9Uu zzhT=Vm|G-`@LQd0^G(^zZ+1-Z*nw008-XK%?iF;?<{RukxAaEm+1xGv^bSynWEBM_ zz=zDiT$u@#vh93Q&X{|$o^G0YxtCQ-m$3-vPvnWRrvgzGFC%J;5||Sch7HhlIQI9% zYefpTkm-@EM$fMm1ibZ{u+hplgf0!MYWO(i zDhOT+iYjH6)nT{6X$PI3qq>~`6vVu^T8kPoVuxv0J!qa03PtvfP30E7rV}a5CgmAT z;((^AX5*B~SDimj4{lXki_J9_KDwJO^NrS@aoP?mg0eJ#VMg}sFot&b$y*IL0LSR9 zsv<{IghzJ<<|6fbZY6Bku|b=Ws^OAUNxVd%{mW#T{4Qxt8tXurIcHF-avNit*m8|T zueKvsH&V$Vt)ZISyS8Xx?of>AFEM63%t=$7-YD}`c*dQ1eQO*BF)M6M{X!LH8`9+T z9*LfFgNIJMR=1y8yug;GUZ`y?z(Zj-{Z4cx5*0 z8RvXE?ak_(+%rJCGn~x7A9mn1AWwIVRNNAt90lk_p;Dq`6SN|5+CRn70TF1ZuKX!E zu!-Do7jShz#|UwDQtH3>B8h^3X2%<8(aFsq;w2h$L~F_VYJ>i?RwoG_X}Y1Qyom-y zu@OI3YN>mHk!1VEnVFBaEgvuqI7d4`wYI`IN2*Y2tCPTr;|HdQnsJCONC%}ww4P{2 z`YO*n>Iv(Voz_Fe)5w$908q#!C*VT3rBSA1rC6tT>I}%oA+%){V^%dt;@vV~a5K;sHkapzJ7j6cnkWO21pg=%k1wyw1M+1P|elDkw&7-lR{8@I-}E4$iieq zu0o27;!4w!gR#t^do!2c)}j7FQ9hXlk(45pZ-$VzDLBOVVid=n0NfElNFniqubjh3 zlatuMA@ z2}aHO<3DDEU#Xhxi2+JOuzkh|);_0tINSj03o(5_?TxU11^`iRNE;0IRMr*GX^b$F zA5}It09{UF4s;S6_XB6RVM}+6p^*E%{7}f2bdaMfFv+iWxS1ijQvw1s8neG9kuCK~E~s z5Fy12S1Ki01bA`O$wF9E9pl6Vo2e7Sg5T3w=`hDM54Ek>?ijbzf36~`&xY-B#x6Q$ z{gpMdl3t2OFl5^8X%-zW5!Ahy(eRL9jd6#(p{I@DSusYHMxjSvZVWbB@H+vL@=}x#s7UVC`7(Irli5 zc!7<-Yzg#9Na5$W_HCZ7D8a?1Vvm_%5Ct6seQR zk{aFf946Lc5^2T|pDej$g{we`-7#CrQz{x5u>n-D6;%w%wGAGV8x$cm?U^Cv2q=u^ z7bS)qfFGf@U2NDq=FiIXNbdrm;Xq-94W0qx&_|ZO$O#Z30p5XN!h{y_IDPO|T!-%g zK`}SbxF83CVS&%CcRrbj$3VAnAe$hVYXO}`%J?rNRsDO2dya~H^5kij970P$l)mTF(~P_~o|%J}_8W;^ zZo5v`8;5`|@27CfJb+kdak#w}SBauSyuj=80GkUSvP^Jc>m+gBGCpnYEb!j29r0PBuh4!vWDVk^#y5U#46UE37vnnvXlZ=KjiP@+o- zFYv%xy_1sRat#Hh<5i6rv&yaOc0qxf*%8Z!yYtK?Ig?ST4{L9ZK49$q zMA;0OhuK#-jCif!(&{`8&KGNHNfQvO)p&Tlr0l0%p?L$nqV3F@8NaC3$jN{m140C}zmp7oN2@7k-2P=SWk- zR;o|+FGY!;ETl;QSQN$}5W2U@piW(<{voJJR zRff{xac05Q#&%_&vR}K*VNX!nmF<*Lk0!5=8)-5^9zxm<2G#DT0mcEU&$6`A6u01I zgdO*`)v$>P&z;o^7PnLDjv&^sw4zzStst9?M{u0FhBLt@X(L-?N~7EIw5ZfOg(wLQ zuaLgrn`UngaD|Xzl|(13d^Yua-zu6Fu_RsaX-&uh$pq9mm*2Kie%0m;S|p{VE0?1Pw;#^z71gCJ9;Bj zJZ-^0Sky8xs8=WJ7i!Bxnju|;9|TpBqLgm_>ta40prS;HgNzy*h~yVSQCxtnhJutt zA3{=L&K-g}z^rF;K#%a}&-&Bl!Iu6EBNdC)hDM(Q;#Hvy5Sj^E(ntAe+H-&AWU&f+ zNi)KO{@o1w<(abX7c>p}&*1{sExvOCS%p*T(7%mL{zHa!Cjw7|KIJ>aObLlGP|Q%} zCmjEPYtylk##VWQ1af1Xp&8k|qboyGRMTec%s290iU=1HzMo^3de84d|M>~^7nBljLW7G9iT20msY;eAnG17$^ zrJ$fBRIDW8YGBgT0nNh7*3z{VscBsU*9%Tep^2oBxQ~0m^9DM^^2vF6#KOkJXX6Y> zsvuk(MXE~u%o`BQO2o|GXNBRt+Hn|VV1&jXw%fMjjL}DZ0XEJUf(Z1R@lB=Ix;+tH zSRA~ySP`3PcDy6~81DL$ky&}C-Yknw@>CHU_N(=#WtMmfLP@jeW4UXyBw;N3Lw3Y#4pGx}Z-TLACWv9pNysL)9JM&M>^PXA># zAr7{O=VWmOK-!du-8Ht~IhR|OGk4SYr>6o!ZeE8Sj|G$Q>(yr2s*@kvO#YF~p-e?u zs`NnpM?y#%>ONdrzTV(3%|( z`;gE_5!2{n*!j=(iN6|c8l1N={yZoRrjeNDR$gQR;8xbroc&D~+y_(ck2ARdooP!R zMJ)Y%I;=`sWaV*_3=hrU=1>eBN;UKY+9L5nu*gcH39a}N2U59YnBhcv)z`|}<_U&{ zD5cPnKv#kl^X`@xBY2ezG?BY1j4jaxw@c{CDvQ0rDR z(#pyhz|9Jmw5f~SVVMdNq&0b&f5eT5JZ4||(lW-J8TI(Tkv=m*RJtrvSqh)`zywL^ za8H-zE%3h5*)iwqcz-khApRbD13in3h9ot3t+IMz+A>7%mC7H2dfAyqLK*K59Q@R< zI^1M_{BMi#=F+70%7b!A0t=lGw+gzKY$@JEz(ND{YAMFAfQDAuxfK}$8TAR2pPDHg zegx4p8w=xE-J!$mM`py5k>^Pnbs^gXgMIVoxx1z!-o5<|h$AjN3d2hVq9b88o)& z1sn>Rs(ObMyFH)qawT3wGH%s`rNL#4sW&p(eghkJCnzf-kpLOYSaXp2GNFNkbt!M0 zc1!Iz(%aPbH!uB#g7B@iyA`CJfSCh%p~&_vhexc@9cDtNm=-@Y5vZLqv3F>H zj3rgEqO^^`2i(vDiVL9$;~ZM`Y(Gn?MS~~m9DKo55`-j73Rh@^gsv`%Q{vE*LqQaE zG%Lgri6i0C%yNlWqd5CoOwcpJNIhjV4gHUT@Uk-M=7EUQdTzti`y;Ckm~{$jK43xwxnwF8JcD!T?qHHcP?bb4V#(X=%D1l#kKO+N*`KL{AkQ*VDSXMeqC2dz2` zakQ`!WRQ-$wPVnm2dwoW#1pCrVsva}vlK?qoW$O?iI7ZHu;23smlhO4Fsu=3J+_Gu zN3qa}q9Z>j=eTM7-7Z&H`MLuh0DIpvX_WcnMQe&3s691ldRIQet@wbAc}X8{{B@~y z&2w!CHu;djQxA4Z6LfBxbNu~4HqB>zgWPWbMtAqb3Phb1n*D?~; z_hN6s`GC|9Kg#EqV8!dc?_r^@_peX6%llD)zcAn-I^(OU2{@Cq^S7n0Z|3Ww>K*G0 z3rnb<%fwJ(esH)0F{BpIrC%28oU2}22c@wuIC#?W%p)i?1*G%#OiK&uH?K`a%tPOTpFg z*#Wq*v3KINg{KIh1SU7dg4P@G!;EZ@hZ)LkkbNwW{d{`XNa_JAe+!51PcEKTH`+8D z*nx8mjRv&mPtHC1j0{UZXv0MgfEt_|RW_QtTVlz6bV^V6COw-pH(^<89AM&Cg;s9< zPQ4S%B=t}y3f165%jqK_P7`Ceh^8bMXI)}-HrIvTPK)6~2etc|JqI?9I1Sr=?zyxB zp^l>rVVapmH*SFF!?t0s_US7|w~qjVwEuQ=bo7b}TmR$P@e_l9{G$N;zaO3R|DoE2 zXv2EyE~ok{YPRN??;edSuD7N0Q=s6FI+vjEkvR4YYez;Bh{A}kSm-v;!%EMH{ty)< zhSd#F!WCU#tuHOJ3j<`f-YUyj>XZ#aKvC1kX3AN9u1B@c ztN?JU>Em?E#%uh7<0;AnDtj|agX4G+JW9o$I?A~}H&$|BU5Pz6)~4f2oDxIUZ1Qm3 zjC*;~`|vgDkJ`U{j?}Q}jITSh!H;N3C?7gFpQLjGi4>eEKET3fYNTF8ML{)H&eSkn z)M8HFcvt_RrC=1XhDxinEBebs);udi-rQ)}Sduk$ZD!7t-L%NA z#=DX$n3%%Zc}Sl~oA7W9Y19t!pg~z@O>$}=qai1Va{W2MF9wMu1T)&0QsAF!vFfjX*z6uPpt^m)@|O~(SpFvAn* zC5$R*C!!0)HJQ1L(X`I!hjH5s~2}L7&41O0V#s9aW zhGT4NZrx|gStd1ik_@rOeZ}gUlA~wbl|1L4j$%qOVKW2%fV3IwD`O~>9u5|E>~UCFq(Sx%|@FxH{amh62CHAGdt#u za|RH6)V<-5R1q%z$tCkbvR{|0_l_8CuKR{I-&>iI>k5IF7FS0gy`tTSIrzcSLP1;s zFOyE0Gf1$0o+Mx_bHzhk1D_%#U-gOG`G;vf$Vvr{Y9(*e$=Ko#1O`=-P_27A&+4lz z1~tQ66?LAEBiEVCzNOIFdx4I+huZ9%x(lHBdNn;m;yuRwgi#e?vTsGltcHMUWh%b; zxBoVK=)SEfl58_w*cfZNAr`6_ccw*FC+!eQ)e2&ZX-BC$U!~#v^3?i8#y3lAgaD_4 z_;XE%?zUY;_>i1 z%|VS|SVx+%`ujtTSl?!Hm1H~$<)y&DQnmm$yVmh|(QnBU7J)wr6M|^{6wHJ$uEK3^I7VCNY zM%gE^N(=7F?c|V3FUhMPq;s^@jK+W=h{>);=uclX-{a(w$Yfu+ka+bL7FUxzlT)q` zA6oR9*H4^W(X-~kvRHy2D8qXvnX*C#gH(+qsHS-tvHGW`{`jtMfAl+{3|&OUocofQ{J$Vxp*K}Yflp=F?Ixiz|Y)qaCK z`)ooDiqqx3^QLFgm3OqLW=AVh8g08_ z=h<1JF1T4qe9`c;xsi2kvBr8)e6ALhD)i4{i{VWHD~drhw;CQp2*7h}0F7;oifvw0 zIA7t%OhBlMym}Mvi`>dkwJ=$}F#|-Vy8pZ_P4)VMAEaqu;{#p?GEJ6x)<0vOIPg6^ zqk4PQUj4mr4Vseg7Zq6O2t!}xG{QZOTr*LTi{*UB>HZdPj!$p?YQiDMVaVpJW!K+V z$VyIKY-+*jVrwi63BYothA9x$4(VAx!nqnV@P{#Eh%tDX#9oLt~ zhtBKq$57&hy)v#KyGo2Y-J}7uFrBW`#M%>OL~{ea4JPgA0Py3qzqy(mj_jhl}1KiUI9OkUVbhK1fdEVE(ZnR zg&5lz`?1t8QHoCqjWemWye|2&!3>1Hce$|(>z`RIRZmM(JT-1RviH?iZGiQ!+b4j~ z=pYT5AXUW{CT#2$FlwE7@lCqg;XfD|a%5QS-)dM>;=z2@xU0Isb1y^k*L1Co<5jGW z%n8A_9Kr_m{d<30Mkmhu(Ei~fm4DRR>oT4XlgaIB>)2RdRHc?NR?y$Vh=v%BTqFo! z;fgO8RUlaj6=(JQHH?L^s~%Y07bexy@KMT3f&)**@?#YnqPArUzcyDf*YcG^d|=hz zc;VuL_UK8~AzKK8iyY{#7^w*~Jj*I>Nxc9ao12L^6M#i=bzM1>o{Vb{c_D~G*@Tx; zm}=}$jX>Y7bR5P$PMWdo497a7Wy%@5B1gInvv-|~GCi?`Yepkunk9wb`_r~kKsq(r z>*Xs2A5FHWs2>$K0dFf3J7LVNDPax6j&-#vA4|t>{s`V>E4LK%Dvzy*Xj45!j&2Mj z=2uFsPyi$SjsA3&Ypzdu`n?hfQ_oGXt1JaZ1@O!P8{$>Pgwhmw>vDr5`~*vLW*piy zNS+{*#Eav(&L7p7h#x8%6a^`u9~Hz*wKP`QJ7auC-jcu13Br5`{B=bZ__jsSMdvJD zUabe7{;i2*-_**Q5(O+*9rBanCe4Bojd_G_fbmGFNN-Mz2w%OL9fx^E@JA0qnttRH z2VysT9nAcY&TUXcQ~*g4XQK}Ng<-YQX$?+OqQ}5O3#6>gi_V^!Nx^cMup9Kp)exBD z2?M7FyOI$>O)=dVlWC}99R(ia=pCNpM=CzTp@Ejv~Ko0hf*?q zsYp!xzWqnV&8-g(2MLZPkFyHF_2dcy#6{PjW9h~P(RS!OHg=!kI_rE%WrSd3M;A$P?e1%5$haJJX(3Pp?_ zY>O;-IAENT-L1Lo9T80Z$ayPYJ>dX0Y=DO^ETsSQGi1vDV$2UUPpL&vw5-k(koYuk z7ebwD2X{}X(OwiV?mr-wgfN5(E|OQoubzDVoAyzYH|meUnwR1q1^ieS%VZge9x$m{ zBbzKWVIRt|x(3%iX++bC?SMQRiA;aE%AcuS9f_x?iyu;Q=f);pG-*>j;g&r}th=&` zFw-C)=Rx(mdOYfr3rU~di`~s;03jwd2CDbP1Y1R~s8`}k>Hga3;*b6~#cC1oEiuZLi^7yP}W5-dnHmG1HY~PS@4c%c0R~zEBTyl`v2BPaQ?q8dP zx1ztUc&a};wfAtJi_xtM-s42^+c3Wcg}Lq1eqPdJuxR&ThL!Ixw;E*W1ERH|nyw&n z-R-)EX0{}H;f=S3KZcn(+1U0@yJCJq?S@)Pqwlf|c~ov7>Axr&x$df%P_1#nJYJD6 zf!`~@ziuLqFb@22<7S8^zplN1kB&{6XsJy!j6Zx%(-<%q6sW1#vZTwmq8okw+c2T9 zaJc@9No9Z%({PsaN6jbYC%`KZ`xWn%uwqBG(mQ@T7}yW;%DpXyvn~5$2Lv8k`R!et z0~C9<7%vhW+P7jCz1`r#G~!2V)8sEm!sa$r zmTiS$SJ{x=O6xX+^qFbL%as9ykY1%U1JShH;ehN84@9Al8=CDey9=Q`5_-v?58aY;1FKG?hhoH!uLb4~54-_W zRQgMsJJ6_AaE!WlQosjarTea9cO0p~ZsE+?j@|zBxf#w|a4CC$q^x|q(AZ6>3sC`x zV8mcuI39B9U*DceftYi`3ZIfdp)jC-L1~gu!Z% zn^s{NVW==qn%Y$ZF(4z(3!l~@z(w=MAPWFML?$e2KZp!_Zxvz&$r~Yo6AoSBZ zzgUECvk==&xVpn3m`Um|WaXk`4(b1no;0R31ySw!{76L&1- z@Z6ax)~_Iw!e=(wbO9hT-mVdM8VKg>fWmGV_shX_9N<}qpUi|c-V-f?l|H6N`pQtC zaW3?_reUI}8aX{l;Xq-P`j|VCE`x(>NIB}NVJVH@DPzXS)UXftD(zmI{*jjCiS}(h zPPN%dk$)MPKK7WlwxxOCP>v5wkNI^Iz?!t1vgHvwl`OWAN!P$r7h`1M68V`eJF~4r z+Sep*0jNtUX4kpErik>fBv0&?tzBGeFk@C< z8ZTiyAWDBvxhL|I^K?p zcj)myzXBR|{r)w4$*FjHIhp_U)%(qvp6(_12#DL5=Xf7hpC$OruCjRbe*#}s+9q=N zHaWqH|G0Z(QW5AA$hv;L3t8Pk?0&w6&i3zk9hJ?0Gdnrkq5?dveCy19o1IqmEn)ff zew_G?-sC(#mxpuWf9%g?%|3zmJ`ies$Q+WTK0S^4*FP@3b*=AizdT&!Z@+$_0{i#z z0KHkS4tRaG$3T2Np~t3gE$7AL@kQiqKCeH$RZK4`@0;0za_;*4pH{OT)z$sVll;y*>>&}+$3dgM99Z+BKA{>xw;1Ap(Ymf~3-f_EF zHDnab)p_~YVp9b)aXZxd+dVT-t?U8;w;RVEod9-^;idIRQ|R<-@4Ev-->=uc-lz3N zc)-iKVt#g#;O8A5n}we|+xGPJXL#TB$48+wo=v+?w~%^wj{jAJ!@7C6z1yW$8sK2% z6F4K?*YNN;FWct#qBS^jmv=nAYw+4+4OHc*TIub!InEANB)@qxxsR^e?LHfVZ?gBf zh-}Qy_q-tk)~53cy(AHQj4}asR5i+qQ%*fnMVWYs8(3VC;-1IvSmqGXs1yU9rKSX& zk$P*BStUjI$n$$8cNs^+MOG*72ny^}zD`_jGg99?f>~AaTuY8Ko+(QBmo3yLERQl? z)ZPW0x9CQjxfspWUU5KE14W(XJ`=r_wXU~8I%Ov6(p{3Yp*vOLRs#SlGI$zeJhACP zyhl44Jmn1_3`st}XSt;CE=1tT`;c4<@mD8!)?EaxYVmaoc$}HGMUqRp+i3F3vGe-4 zq^X{HOyD+iWY?2ZYSnr%^?<*f5?|$Rs7$}VCYV&SIa}yuNdzybb$6uiYh6daJlI(# zOySzTm9Zuekq|Sv4H*aa z23@x1HWfDOm975fkNg0?o<1i7Q!9hdkFeLkq6Bv@Sxc++QdtckU20Ec_j!I>0Sj(s z+jwi_*DOr&TWaIp7Nkq?kjn`=_+78F*^C>l9%>#MA-Vx}o`VHmhAgs0@twc9$o2+4 z=U4u}LpG4_@5NaedKvk74`~M3>2anp`Dwa&MH%{;u~jA+c^UczCPtaC`h(<$Xq6|G zXy-<3Z2aUXS?mCi|Ki0W{t{<{_*X@7iJmOvgAK66%v%R=@vWN;*jh-#ps?0Uj|WdH zq=k?Rj~z8=gN0~vhAs?PxO*hoHg+}5%!qocO*9ObDc(k;EH3I)8qNO6OVg;1YAst9 ztie>~UwP}Vs99JcwOf9x*x_TNfqxp~;qv+Z=T!6i{x)oNSX`luIsohvT;iTBEMxpk z1Ofb17K~{?`SerOq~bV94d!bc>|ynGD4G zz*+`cHuFKS{=)tZF##Q)Eo_UC__9jljM=C#YAXpUk9`F*jhATTkct!43T-RZOaTBx z)|f~W{Z~2uG;VYTt|qp;x?4=Bh^@rRXwEOABhO0a-y=sy8ClElXx0$-9&zO6`kD*) zq^Z*S$)I)Pzb|vvQvZrVU~dddfOwNr%yekQ z43Ka_kH{8e{OoQtXgHdj8JlhP2jBfG9y3=7XG>X&6LnkR?1gf*79xzrhf9 zil`2%%OqxS2E%OL>{f|5ER_vGrASOGu{O95kRnE6p<2jZMfG0OxS+*6musMw zG2nY6TUM}d^2E0#fYyDt>=g#wi0Lj{$sU!9%Q#O{C@3fCnU|y-oCha`r(YWU#*GoA zTaf&7Ce5tFxUfi31afi-rn~$lsQoI4+6ilZLy4al0qyK4OA?u`_rTm0=0Z*`T{z88 zNrB+<-fmPVELHCq`#K6Q%XD@}04SI(7@cE25Ejl)zchEKdL}2sWLBcstM$WmB?QC5^M#8paP_Mz{)gzP7QH9hX@#9hwlU z-ho;f?1E50nJ>ZM9aD5t0{cTPaz<_39*i-a0v6Rik+Y;s6F;a@W^*N!Gv8Q;i>M$KDuUZlrwnDhM@xfX z&1Q7YAz?4+ceDT%am<@531n!S6}Bv}QM!Swzd4~8JGi)=#9Q+^k}*2}hL{;tzFi}h z3Zsf~R4&PclQR=MngijXuebh1%$yMYDqw+Sl00qVh_E`NA!-6heT`HXHL>_T->>Hi zYJi2?G$OK0JHJlqc!F+KR%w<`eCHH-rVtfqY%;M&9+C^DN}Fh)mG+>Lv4@uPM}aNk zWIC#v#nQ%TWGj2Iywbg!aX%O}a5#4QQr<8;@eU22lc$7t&puTi|+db3%B| zn3Z|ygFNwR+yy4?RhA-k+tw^#c1;F{2wK;;oknAE)fQs_t8!iVJ$TYm2A|)jg@%dH zX8S936JlHsj^W!*Y=}7~{Ee^s|C$|0#pEcMo|1kegd6an#_4TFk zAU(7%VIQP^NF-(|vb7NFBQA8BM1k`~dpfRcjjSXy4)|oRLvioOVNPu^Pw5F%M`AG7 za+iTFz-1Q#?!7Y1jq1Rm<~0#knH|z+g6cTB(S0LrL7?dZf|oth?20la=Fo2`nf=}v z@{7{Zl~%i}GKW324=n=GVMp1wB-0D7n3Oq&%f13TB1)VtXWF8P`&YzNz^eK${iZydepv(Wq!!Tpe_z0g zseuG|)r;@lstuU_z2Lg36!f&e%Qe1G#=4k?_6>erK{Cu!dc0!s0~X)-S4&xi>C3Ve zPBQ3@D8do}I99TdeA9ycW2^|on~|PZuvAk}XS~^R5K-cmFA*(>aBFDrL1GbaJCtcD z@OYpQ>_lPBT5tl1F|ga$TLT=)Z0#9P)-VC=Y%r7Z0kCZiVj-b-fnLqfUZx=*BlGM; zj2R&K{y9wCvl7Q)S?eO69+@_OhAfyHYGSwjTVU@@DO-=T*LJ21qA6>^(RGO{2<$Eu zz;?YmBUZtoweL-GV>J`BNgKQB>$~cIujZRO6lDHw_)T~az=uUxfH$>_;mg#>xUUUh z;>%wDJ2%NV&H+PV*&PrUFv<)eur)FPVNk_r5~JYeUorS9ab&?M>M4qNp@ZF5{Z!6+ zE3F8w3w{Jb3(^S66Nnwn1mO%i|IaEkoQB``&=ClOf_&7#j4AxUFeO?b6D=vHPH53E zOlA@J^f?c&mWwsW@gx4F)XTSKcoh_|tjmAFIN7vrwf+bm`2*^e5(hCx!<-RmCEyR;YZ zh8{VI`MG)gu&lY9o>wKHy!rXm88`Lnr*~oW89ynQ?sYIj_0F{gta1SeUKQkf-}HBp z0g+Q+h$3!!zn&-STE04;pY;nF%A3{O2zs-1sqsH=*Ax|r?t5Jogi!k&Wv{hb8V|ch z-=F0SZ+y$|JXBGQr`H-gMz;aGxW0bBzKK>C6M%p7W_Q1zxE#*C^R)j6_?`?q^fh=p zyhbb9zM0y*A8cr5L=!BcbKP{(G`$aIB|kC53w2(1{;tFV2+lG0xV+^f=Xwq;uWxjc zzh|Cnt#f^pUCsKdZ?{s{tZOiZ2 z9|=4~#akufCbOAeGsCd|)Bl z0rfk*-p(0D6>`5nLkT1|Bu{g`?*q}p35eFTI%-Q({jM4|Ei?qPbv|w#+P-ebRB!P4 znIE2?v&;C1`VO1jyKe|bkqtZUux|WMV?Ql5D!4c^i1v}-t>1cCbxMu^+)j35B@)U5v&r2 z54W}LbG5rwWf^wXOaJ2Lon|LT;5QA^H1cJ0{`eJ_<$vBEMK^(0x)#rgBuTR>ovQmF$e?Pe zr&gr}dI{To*raf)I(?I`U}?#MuG9}-)_QE>X#;TY&68i_WnC@il$ibW6~k7YX_2`#BNzAxOotmjrd0~1(V2HC z?Z&c2(0G%0{b*)Zo>{TL2@^pBsZ7wUUZImYX|xrrTWN}rE>+W(5Yepu6V(5|Fr%GA4T*~oCnw58n;#neA;@nFI z!z@V;wQi_7Xprf_rcXO*YT2@Fdo$ckuK{3mwDnycTN~s}TU0^u=uu-*?G=)ul9zC> z;>OW&n7LIL*y*gRQ$H1eh?FDdq%d94&7?NIj^Jj~ENQtFu5s`Umf2T4%BeRFZ_!to zTdG-0ODYzMcOWRt{yUuLCeA51uhr|V;-$ldeJX{-6X8e?8I7Ne3dL6u)a+;NU;tEr zQCXM5rT}wRcOz{6WmQx2MI8Flt5&J&CsAR4f{}TbHNM)Km$m891gx!6`wHO|A3v1( z62O*(trQTozG*Lo2bc|%qA((f*h#M@aa9#N9^SD)>M;p1I%LT0)z%)WL1Gv2&s3rn9jAx5`egQ zKjQ!jAay3gKG^0sn>ZaGzXCbYWD2Il9++Ifsm^B(DR7&8UmjpneZ1#+G{a>9>q2&W?>bsySD*-0I*&i# zm|RmVx3VJ%18^XM(|v_w-=HQ{%OP!fVlNtX`xvcPb9^I8zcW-*B7B?#ML zmaV$nnpjtd$#B&`$ndg)2r%sz(va6D71l%%+|R2&E-TBLA@Ir=(33nU@Fy)-H?)qv zLp_#wYR)~;l<1<;RXBL*ji5+awV*3X5`5C2IL)HiC1H%uoM*Q3Ri&!@*i3lMBhD3( z4hC0#WRUAQnXXdqS0d$3W7w3r1G z4H1rWF6Ix@uVI_C{IO+%-|`U%&6yZP|uGd~r0%4I}q>qDKnf7+D3?ku=2% z--2MSZ!mvuMTOsM{JBYiw=LbJiOzunITgDmtp|sx%Vynk<0E$gA0kNl_(_vd(Dx$t zY!B+o&S^-xJ~24#>j7khg)T(|4e!tK8a{FC2}M&XfPSt+4(^!03yG736HCHl`5P$g zW08x@$wU+1$d{{%1<&N7$ZjH0ux=CGN@ZLQmJcTc?twY2M3vw( zp|ex46ilV!xoxsBMtuz0I zr9{L<1a%XO$~zz@-MI?+24QAZLtJ(Ww(*k;6hr#le)~|Y;FF=)xDzf9e-H#WB+nq# z#2W#Xp2u3LE)@`%OB@cRHh`8<-zOam^e!?qT!0r@=NRowQGTn>pIR^QtC>TfDrMZ=D&0)Z9vgaymqlT%{E@J z7*Day{hB*DdH!&vZjPk7tWc+|{}0*FcgAZA=4*}q%sGq_(M;cwtE)d}*)gr1NDgh- zJ!<4b5QcoSGDw#)Xc7qaf+`fCeNy7u(sdoGpj*IVkNc z1uM<~rHTG|ceKousYz%FshyAuU0--s;M$Yvyr zPbLE_iU9%8yO4(MYyQqbxqirnZ#L6em zQd;(sN|mmuq>S=qNy(WiA?oJL=$KxFp_f*E(Oy{#^S;Z_gCw75oP00{Q6v1kc;t8=m>@K=ip!Ei z#}>VvirxaUYb&cE>+UpIWt1^8SY@76F(jL3H8_deQ?{JHr~tAih_k`fOok81rV$IuhL;?xcbf&p6t(uiD`H?E&>!jFKQr zitC_Moaqon4Mo52zNGI&r1cZ{^mp4U`3pQaUBbL~US_>{9C1I~2QH)lp==XIH+hAv z;K&Z*$4nCWpx3!ws!|t@rOaG#1_jd3_AcBfcGp1_>!51m9WQCSR0Z_g3!v&>2VsE_ z;ZTcP@4KS6DA7RcKMO&Ch`KEU=wNAksfYvPx34VsYbOvYLJOaq9U`DNnTQm|8u7Ea z+BF~$f~h6pms?}sG4cykY8wkRBFY3lN$N`}DPQqzY_q}9jDx|ETr^-!{h!;Mh5ASP zeraByEIAMp-ctL7@(~@NEKkn;6TA>CPgJRGaE$jb(c9#eGB_o50~dv^^hbr^GE zaN6)xCO3O%cH_V!p!K|rANq9#LB~^F@Fd_EV$1LaHy%UzqHCb8Tzi_hCO`e6br|jV zcA#|ULP_+spmg{5-{M8~PdJvNx)}Bgrhpec##e0rX%-j$6F%au+jouXN}TQbKNrRI z^7BVmOt2$6=?TfgzFHZaiixj5)-&BzcAko_pzmX@0Wdp>0Jjj!VO31poMpKh*Ax%3h@XqqP0&xcw(B z?iYzOJTP2Cme#rBOFgLSoTqw(L2&og}|);8n6ga#YB z5P0hM&^S+F%vT23PXyRcI@nLV(fk{c&S~h=dBD;L5?3v+6n^X+=ZO|bsh!ok7SXB< za$z?=fuOdU&`67lLeD&&IrOSw^K$ok`sNzbTi4RX5J5w%!iT_I!KA#P)7iP`{?a`* zPuj@!Q^Fq8!E2q->@$xk|Ni7o^veUnQYSzn%;aOmU$*7{`+_AhiR5iW7=;m|CZEvUX$h~8R7f0e)F9nt{ngY-Mpy^}R5KvcF| zm|i^7^vt&x>wEZ^;O*W_NcE?6!CQPSgP+~Je0GSt7Cv54;m3;v*?b`LPX}`BInrU< zaR~%S`pT!T5&oGZ{B;Ig+>ECOix7E5`B8j7VHYJu#snhAopEUI!K)UzmNH zzJ|}++?{zg-P=FK|6fF{lLVuTwv0PM86;a-uhpa~2#OIWpblA17mgg>9PCFV0i6aa zBoRVETo0|u{u)PP4yZs7N*K*6L1O4)Q9)aNPbycaghE${3-HSgjyMOsAb97QbeMX{ zdN~~b@^!j~xEf-V!7}j6C5=LM>N_TlLg9jD3E~X+W((5|U8i30>96`JT=mLax=v)> zS9V+t(IUJnV31;o2JVlh2U474m?J-T&Hr1^`_+ zCX(scLy=J8+gJUNgH=KC#!K?n5UhMlUdw2<Tt}`t*r!I*`k8NZ1ltr{{T=$ub(uf045%OS<5f*Ix?qedQXucytz0Re zZ4std3e6GJUkmjGqkcm!pBpZ#&C_~IBZ;vcf6QpRPZfdk_+C=6}Y28d5Y=yjJGl9sz!r0#xUx0A^tsp48TP*3*c(Ngudt zu+s~!RTt{?m+l=sgDYLThRX@Zd4JjfX~!xgjL{JTg4d^>leqN+ty2%JF&mbt-qBHt z=#!Xu@*&~VmhqtCF+j4?H>v(D?r@rT zuk+M&LdRTFUJ9V$7MFMy#dK{7Ce24h@Vj3{j_wCNY5P7)NwFYbIqyT3@cFL2f0mh2 zzB%_)-~oeO_bbQNBV(g}Uup1~qie%&vU-Fbt~BFruNNv? zCB?4qU1?$oPYppV@~kl$HU>q)uVz77m){}C$$oIJxDMm(#eup#nCe}=O6+&p$d0mo3*T<}xWD|d8A&c}1>DAD2P}`Jiylj7V@$tW7LWPtmRrQ$B}sQ)oybe@*(`b&RA$o>Z2YrOXR-&=^7}drc-4ASIJ_qf=y*#h zC>nik{{7buOd%{*C0kAk$6Zn>e^AW&ne5_5N$*{BYH0~h6np|CF9mgR0(gy2{&Y7` z*B9V8J3e8~H{SegiFYt2w^+Y9z@8N{N{%QtcpB*EID~P|rQGlrl zW~~iKp}@p3d@ErVg~mU7*-`c(G=y6ya|!a7?zOn_#(XFlh7`);;$QL?C-p}0WSoL} z4I=971@9~CBcoM%I&OV{yYEV$ifOCvPHzV zcR5Vak*9X+pLwPznz30-xjI@oYD}sRS#&nKHgvW_;7P#-o--%$_%Mx}&_rQr&5DmT z4R}~QT;-&MD8v&`jXZ7TvgK@*Wz~q|ktJVmPZCctCs4epYSG={cuXm&DCdoZgrYo{ z`wz{DNHvH^?Y~7T0I1Z)Y*tcLFsH1Txw_rHJKhV0F_~Z)5vY<6!xeh<}4;Qv+ zACdlV9?*Zie)=ElHWm2)p9h#2+Zp|D9v~avVY|+V9zv~Hx6fe#0Y|`v&X|q}2_{mH z#@<9Z-1bw<$-?OrU7#2c87p?l6I2xBf}*N-yPPqDenq{Uq1MtXE;KAaxe??!e&>00 z`4c$bfC*B&y1mW0^_vG0WP?UE83Lj9hI*G+xsor$o$D{$XuLQ1?mv~sAgbkEYjF}^ z&Y1U$BJLfXAt^`x++0!n90lEA{B32nqY~MInZ_WkG8qS66-^fKMTX$YnKw3))&DIv zR+oV$*B~QWO}3!TnjNJv4!nZx#PuH?(8UKKN4REnPH+rgWs~14c!XO|wANUj9lZq( zB}lNnntR5&%zaY{ia)>|^o<$ssrNYy_h#@hDlfv?C?dB2?TqDEN!uJP(7AkDqAs_K ziY_>>bn1iJGNK4r_0*VlDo(4J{K%-E&PXf%EEM;7>}l`eJPwnr0`i`=y^%r(h!qQg z{9SN}C8Yf(8vNTE!y~8*32NuBbrHAk84$haJhXum-)3v<)Iz&L-{iMKYxrZmdMFb% z1u@Ik8kFoTd{hXExhl0OX&@>Uh+A*&No4Du^Z$KvNc00D@N~GfGG_3Yfi&a&g+z&d zn!izS>!RYBlv#y(mA)M98;|wiW1fs^*B&+LYu2w`FIA6q%$$B`zIS?EA;}@i87UAl ztv;-+Z!((8OUW-QAB-u(V>Yujvo||W#nTn-OXY?ag7A*hVoli8KL$K#Nvh^_9)DF7(((1|kaU?Ry^rtBM_`g~vN*%3Aa!^4) z9tabn3%>)=8VL62Ch1pl^31Z7Sr5kFz_1GqzKIOP$(aVh0IsIc!p~?pB7z2UX{Tff zGNvI{k|1j6@Kn!G5j)ec^`94;n$;y{9!r;()KXb$URxR$rz#$no9$*XEq+^ z*)30~5$bPGl@Aw64 zWuwI*%c+b*YvOoDa*FJrjw+uHUD9Pc?5|?=lkvZiAEumH)2oNWSs^NaFkUYk4TSww z8ZSclNTGfUrQ0m3LWqyGb<&$m#+#T%)u&;&u?!&?=P{VAns^_{dc_xXq%z%2)aVYd zQN9DHmA{1suI|ZkL!7#Rw^c4{J`6G&2xplOjj6jl~6CQ0A-ssS9TG7Uz^U)&2p;B%BJ2A+po@ zOlmiCG&$!<69K%2X=K-<=ncicGUFA$-{5w!IYf%Fog<56TrM2KroBrnRn`i|tvqY< zO5(oouv|}KT*RkQM=FEvAx|ic{t60Qlq<7Z^~F7SjW@`Xk*khas@2DQqrf?=PdWz( zs54jxhAt$wDp^36EJ32t$sy*R&tXERUB%(wue;*@`UOl_>Q`yiM=a?4=mypO?Q&|t*RVuEl zH%WBzqwT}2ZHyRV9-={^^(LsjQyBmX(4;_?G`Wq?VT(nxz3z(JtPIgw6Ek^~A6+xj zWCEq*9<4$qcXj9EUw^yLOBGywFYjn&NtrfDr5`-P*XSnnuGtF}c?ynSlT zw2s7^)R@qg4t8gIaLy#2j{SNOg4$mY!5Dzm`?GQaBAyvCWtnCgv6yPCAO~0W(!`x*w_y>)-GDPazSh8X*_0+4bQ!n-;hB(~_3xof+ z@bsU8h(XgutxZ`LHt@eNrtUj|kQn<7m;=RS^j2WsrCL} z1IO%G#aHoonyoK;(GJ2kyXm^5R_Y%qJ7t5j$gXR|a82|VIqGQg-0o`wD#D%^i6%A!kyM)4Dav5c z(ff(0#jt8G++hx|1;J|NxMmfdnl&_@DhhT_=R&;|4xV-jSS0O-%L&>-c37Qw=pT~& z4iG_+%{k7^Uyg=af2rBe=0A(XW6BT?!{aLD4yuG?8a-q*9X=L$|^*(VM1bnb!Ua{ha|uS0%JqoH52D z3E@K@=(|?&dN{DKuCRV%Y<@V

4Ye#&%gLCU@m_@u$OA=0HtM4-%W2w(~11quB-r ztQx_+!;^7)TDryOKWHEH6ZhhZ`{rd66^RR_=d)JW{ymrO;S*4LhdW2h3fIoRqw>^@ zk5-rly@};FDmTDml zw>Tu0EF}7%ma0Lbr%rxT^1I75iC?Rvv=GanpMWe%>F_}J$%_bkCYVS?F9OzcW?yA! zG@{zs0|HP}dUy#I8=3|nWZ_gk$GKF3;fam}bTLX8X917|>$5n#;2~^F?VWcAK^RNN z0w7$^)_SgJsGfv%lus98xUpJ4X$Hg3S;_k04UFz3Vz^2TAas|G51Zl6gEF?jhKBP*t7+1@<511+dQ69di-GA z-GR9^0N8;&yCuR3he{IRDJ|X;q@O2Y7cV@dzk5h@j;4?w?QMZyG{RMCmdPHD{DnCx zi}4Q`!~1;m_jdRvUs7TuZNL_eHscUXC;{`dy)BD8z`^(#({AEn|D5&eN#d!TtBdOdOVxIZA zqQRU9(gccsV1cPC-<0-T#vSM5J5i4965G(-j7w@swlsac-g+TXfA=0z3iJfh-awbkuSf0neG3_;!efwZhp*$44EY zJUJjd+nmB`V1sJ6dr9oqgu!lcbc_MvkE00gxyX zkZ(FT>;mHJoCNZ^QX|nn2(+}h*7A&KnF-70ow;`Z;K#HhmxJU5`pxWu5W^DD?F~`I z?Kxk&2RWYJ21`*n!?#7jXp*N`){4gDtth^n ztBl0Ao}oLM(t$d(W!mV%v1JG!1*9Q=j>+Yrx+z(Ut^K`SmorcU-K@Gp4aQ0lI~+P0 zTmVZJ6ooE4RD8kZB4Y`2mnC>5Z?fxai^Eq$**Ov=xLCirNaV~UC8pMui?*Z~?z9yB zVc&@jC63ESo_`1X3T==5+p)$k}t-3ONlv+EvPa?FF zsr4t7o%I~6N7WC#@kp!L$k|Yg2LlWrcO#**iUF0Rd@reKhzIDbJ51Rv5sbA1Wpe_W z#%!!*YCMkPQY12P9YLfla=_RPL8?L5t)|*VOQ5AC4a0|_!LXnv^k1D9i7=l23776h z?t?uo>xe>FS_Ida|NOc0lO^eo&Fb9k8u1`p1$QQlB(+~ziXktZt>KQ2ypGIa$~M)C z#(`LNy^hf1c(fRyNCvXq;ohwEXbw*qcJ}?pGS|M+_;996365gSM9f`5biEHq)K6w?7y25`|{WtH2RS zkQuy>Kq2ljPBwP{-8q*O%$aO2AldTwWSH?T3o$*EFf2SZefA#nZKC1GFPa}eSoW<~ zSL*+^P7*|~kNtAc@&aJA#0geQ$QFjZC22;CohqF!Cb;$a!5>*X-{GGe-lOhZ-lJ42 z{Em%6zpN}E@SDaAUuX83VAakCN|e^a)}2AtnSyLv zpkLB43pl4<&)#O3WPheNz9QF4q+3v*^vPR^AzGjor_^R#t`PA3Wc~P}KGtvO1`h~M z^bm1rS~N2#w16O?aX|wVD!N<4-(<5Uan(vib%I8H(a8jg9;xtJ5R!OnPXR453@k2?xtkV7VO0{05N_m9F+yw3613HZE=u=QPE-^dFX|757 zt_AJ+r%tO_+j%0fp7cFHJ~uHFo}Rfs`k=FaPf3sjn*rr4Np%Hx-#g?O6e)*w5}{Ai zsFZ6K4tx^ReIzEU?k=fLbagv*R>xgLZ+*{UUOM95s{MNIgzDV9Ng?tnkwt(~>x%*e zdF2z-m7~@{0{e5hR#wb}YxJ|maA{F!l^+!yf1Y*ZLf2x~@OOqjlAavUUEnDn1WbZk zz+boarhtJg#TUe}0iAYdEY>gRR;2zlL9%-VneLGnAr`&vbq3aDYFq+L%9PLzuB&f2 zRp-bITWCp0@y%X&yU0H`++gB1V?R1>dGHfrqf+4XC{L{XBCg3vShp&eFx$`FR&4Gi zl)H22ctK=C^G^G?Vt$m1(%&g|-f>*Bil5RS5d#V(S zHvn_CSQHIBzZgj1NNN;Q@T}w`GX*#JzN1)9G>Y@=G2^Rt6zxjp!-Nr!nxXd42|f0G z8vQH#7}#SE+Mb2WEGrw~)9S#9O_ba_3L7P1wdrekci5y69sB-$QB=J(+ zcGmla8&A_i@S!CG09nzb5|^%oJfGFU^~!(p@L!d+Ua)tI^r;_LRT59 z+X`!UX;dCT%>S}vHS@FCK<04Dz=Nhi{+;g%pKZiRqsg%lB#URa6LTLs!6)E!AFw=P z|3Hlm#r}Y37=;1;P9D4Cn><{z_-OylvpaPw9f-OXY@T-D_jy8k<{3PBV&ZQtZ;APU zj|G?cWUF?3u=ezrAXQoeP{_Qss&g;NsNv>* z>rL|iXaLZA4EmbCQ2E|nAE&Pfe7;Izo%wY>D%zI4Z;Xr@lj7 zWA#r(^LJl{LM;<$y&l~Ix7Tx8(S$s!wf7ZCPAp?nXWLocSHlDTZiqh3#V+TOdS5o{ zE^0QhDNkKrXR%M#Z^FCVVyYj_dLJ44@;!A%Q)yq``EPX=HPv2+n`$9^?oZDEKlhve z!Swb<=e63KBY#BrBb^@5Rr8sUFvmh@9&L3bG={A%>JQnb3UN; z9gF|vzK-yr60WBq|7&xb->WH1o8fy1PKUvAQ? z%6i&lNcQe_?Mp`+Y2nH7_?Q>pp6W{0{OilvZt@Q_n+6;opQ?Z!UAA`qyTJJ4j=7Ib zh96(m_x_Jbz*D3h-}~zjHJ>Z`!C(8agjLpA$hsZ|Q1rpi9xh(i*%8lg`^`=rIhwUu|BhiQaY9r!;WSwI!4VnI?+}ad zaWod1^4`U3_qMie^MC-4NYM`Gvts4=%sY5ZpM2uRlL0`pYRI|z=F6;;4h)1eWniz= z1PW)4Tufqw^&f|DFuYO!9GAtVuFbKAg19hg$VBhy_PUICD86#h5!M#&SXwG}X>==G zU$2hQA;2+1Wv5g+z4}XAL$GK0=47H`rMK|=)P~U6J6~^QX8~D_K&RGRFws2Q(RKyD zv`=>LqgOlCc!H4vFETnfCeYPka4c0gbIY>}p>IiWfVrWHc0!Rwj}9Rt*wL!S00~_y zYdc2EhIal{+_z27Z;@zu)e5b-^}m~VkDx2fh+#lLN)ZxlTPT3z|40Ixktm_eawNgX z4tkID0`?n>goJXvVI1J;%3C9xP@Wx)TIXo~1SBohMH(z=?@T z(n8VI=+@3@EZ#()tO`xBc@XI?ebfH?R|us>o-OH3{|}YEV1h%(|^gP?*q&t{}Jd?J%>_X^&~}K? zdBEs&Ai`8tj88=+?3%$GS-R#)+|}ye2xE(lx z6u$u-5Se?j8~!T}-5JT+{ubH8D;WLr{jQRQ9XkFQBwan)=ykolnDAdeO(6mZMJgc> zJ=A)SkN=^OewvDTj?BL0$J{>rwmmy=+C}m^iD3|)s3l~rGP~&ZCDZ%L?Akw!u->~6 zX9Gm06Qxl{hp-@q;V}brzb`xS+^K-|z=Q++WgmZNUH6s0*S-5jn0nx&Gotky3`%SW zg`?>^$(0Uvi)L=CO+uP40ow!g4)^5g2>N;muo7UCu(2fR2>nx$eLR3IyY;5Ou&ZR` z;U(z5OzWpixy?EJtPiN;!1PU4e%u|T5*2mfaIhRxVnj)*u|(}+wlcD^In*7CF9>j0 z&L|akLUaf9VK5f~QI^LK>hdJ@;#m#$VhO(4TNNf76K` z+D+Z@*pSftr3I0?LtXRd?&ciIGZK^v@xgMuPL{!Z9!@wq^R-3z1INpjQkaAFAFp%_ z$q<5=ZwTYBu`u2M24e!@XV2vfxUepGbGpYp_6^^|sAX4`9*c+w`+~xAMgl?X$Rrx; zRdo%ReO>X&pq21AS(<;`VUTs922=Jx)}|U%e&Uf}mP)2t!;1aZ5oj3iZT%iKp3$aK zs(rAIY#6~k5VZ6grYaDo>PWRWvAS>-!x(%A2CuTwY7lLyFa}--^gC6xl>PWlT&;_- z{yFFe%8kzEgeWqlVI9J%I5~jh7#uGYc2NK4lt{Q;_9hp#$o)5jn*0qe2rRgAW11C2 z=`AW%qKu9h_thVioL00bW#3zZNH~UlPCA0g&)Aad0)kPAY zW&P7Oa5LSD)tSG^gIKN?Mc|Tkd#t-Bzr_bqrzC^`cYGf=^o~d!3KU zMfgZiS^}XP>z9L}lnB`XBXjb6+Hc&v1Cx6xM;$C~bcUP=ttM{^2abNLB@H)OWs`i!e zy1L84&hYcY=4*EQ-$a`Y?cIDc+?Ue7*1mPN&d2dApc)DBN4wek9+KVnjn~gZ?W4vw zMBX#cEA}*`p7US)OBKMI@_ZF=edT#1t`_5cUNbVUmsVV6RD68Vz zF|H;A93B>l&a=M9-=nx0{zGK|OwBgsHME?qu-SmX;yBO%zV<^LU66I#z9{@mJo z>z0JirW;zd^R{SUWG2Cb3J-+pX4)YKQVkOUJ{3|-O@iXi^5jvBda2~}Uo$%$0*i?k zL;+3K$#+fs-6mGz?QY}E2aK!?KZXpIq&vw8Vyi54=Q+*?uhtq_x=g&xmKjypay9eO z_15E0Bav1!#V`gSlm-&HXKR~@1+AlsVyFVM`8c?j4>%p8dGvHC*;gSVw}l55icj`M zDIml`jfF?WxDyr0OO8k1Jkj~EzWpuzu*GAImU;LBELi;vpufZ3KnhNs0RN?4mW-Ic ztoheSKbSz@M-Gg-alP%SadA3akaZs|G?kg2TT@3LvR?cR&2N*|YLZ)H3^`12Zk|eZ z-s9W*Y4p>GqxHcE2M4}cFi?1W1xk9o4R8)jo2~y^s&$#MC51C8unw*+IHwBi&wg^2 zOTzws-gf-KJALm3Ow(?9?fo7ZMv3@vEN1+eX*6R;i2%ruh4%jml$bK>PgMyjF4JPr zJ(oOKw3*i~>#sEs)Qok<&C`8|i|=OyRW6t6n^3iwx*P|paS;qwEmILHNYdz+-6k9- z9Z4QFz1in2Rq!h7*NwOk3Yt(QOKv?XHh;Ou46qaw@2|*x{aC3Ij$DEb*=sRW=~(@# z^?lWcEd%gfwO7?PTtyiFJC?0etxiAS5pO0^=rVFQ!R%y2(2d{)87SJ-Zd7yaU)`jS z5k|jWr}e#hMb54XadOgezB+Bza^RP7u;L6Ebt`H-zyG8=P|9mp#y{N2Pjtc%lUJsn?=;|Fs zDtm0`)|n10$Wa-WiDD}+6Xj*$E00Bt-5S1WW(_j+rxAK)<-mh*`DE);W(+6&J!p{h z>nTw=;XS(86Pfopgp25jwIxXUV<9*Fww72^#)3OGWp2!N zX%@+qw5Q=pBlvEm`LnLoDg$9dTjullREVcY|QW*=32o^OlG*0E^a zF9ifUrvtnleJ~fTEq=TA7oJWQTVmCF2wPL$=EX%5@4pg7DX8x6^g0>Qieh#RK@cvX z)L88Ax1v+8Iq_ksCQ$sh2M+F zp=M<&B$4PDH=&B`ia;`p5wom@h6xM;BD&vuf9dDIP$N-kDFj=PA^HjI=}O=TXST*t z+zf~W-TbR>|NT)(457j_8ZL~y;6Hv-=ne8zMnjbVL1S-^=AQ~H+y7hM7PX({E|)r% zI}`EXfOw_M9hoNTJk#A=>3biu&RmHDrqn@ca=u1)TyO!gg?nBuU2oi=f2}1fP`KlZ1~tMmgicYU5UMTcUyPm}eQ*$HpNOJ)llkjHntR)NfN~k?N|#YxkOv!T z37jH`355=G7)SM6q~9gzIT90m4zy1Q4)ks1RW~v}baM|J%R(^DRXM;o*zlY;W7b3O z7MzN{vT5+{r2iDex9?$hQqsozP;AZl%_tAc z56*4jLHG0&m!I?2cWOrf94#kmzfWq7jW1IGa@k)Td5{N7(zde}7^ zjO8gk&wcj0mxq)oYPOZ|-@lW@eqI0!TXtXQHP<{?I$TECc-_yQpIt-1Hl}>^f*!5t z)#{6z(Md4WKi6x+wXdhK^;_1!7f##z$+~=(Q`2WsY>ZF6>r3o3A0@r-Yt(x}UG#D= zrR&t!{bE_>MUF?oQ_q*B&%-F(+HFl&W0$RNd#BkO9)Q#8S*Z_vdD(TQ_Pm}A^>n!% z=(IfC^6O^1zen9H5@rE9+V4~IpZ_KcT3oK(a@0HP;zTJgY1Y?A?U(j<@mIYc?7yr- z3i5PE749Y6f5ZgwdFkOJeso(*JFQG~uj*_3l|a~Su%&f3M|oIyl<~fqK3BaRxzoDd zcFEGy$;z^$A4o^t@d&}PZVg_cPl5Eblt8?KFj=-^+pU@c;cW)4``}yqL z(?1fA3HfgS;GXH!@u=i*B-f}?sDjIx$&NrOSy83qakC{GF>N)Zn|76e>6P>2Fzx3u z>C50eK|$!OyOje+xSm8#Yr%sP<-4g(6PX@M)+;uyKBLO;Ew*L>w1UGfsuJDM08IO>_W*jhOMFLM>8Ze@?Bl6>`O zqp@tGS8}Mjg`6%C4k}t`15=BZ*$C(8R_1 z!^`MH5A=e7hOooe4tySuTupE0*EcowG)LUUR;>|gHEg$S)|UQmAzBq%k?# z5U5kASYA8cXtQW3U#vi2XH1}8+7Ot^vOVDLv>9^D-mR(fZr!ZHWGV8_<tQ3-Rk&G3MN=@DjK3r5; ztV*?8z6vNuYKU;F^;tG?t5plncq7Slq_t^^lmDv1Itz9zqyLVivWd&{XF^~ayskir zUd_0c;9%XmI81?M{?IaO*;@~MH9vzIvfSzPOeSFCg=a&T)` z$Tv}Rs9-Ubp_fM;o8{g^pss$5)=9ZU-mlfMJDghupRCtnZ6$? zb)(mGMee?Ik>=ZhKOR6ig5>P}E{$cnfS)F~<-=mnqfTWK8J&qoMs1&mTw&X-IH9aj zq^K7_ttGZ2^qKK3b6%AFxW_~)ZLiKtrz#{9BJbR^zQHEgk=tf`K~}yDOldg-kDFJc z85*Bk`jFg1K+4&!NxdV_^B|X= zx)4K(u$xm9`mc%vs&OIJInfQ>m4eTUKF#_8lD=u7ml+_S91YRT~Pg-nL9mst%-Gj=BSu zdy^;82SadZ=0d4Sjt2v76`oVyQ=(8ygcZ%AlHKyuA_WL*(7x4ZQ56OSp#)*PZZ@kw z^)yXyqhfo1oENO`-Wb!Ct5_Vz=n67I>R61|y5P~KGCZVGsD#8F9CJ~@Vuex1)0k#& z`57yxRAnK;JiTw(%@q2_QgF^1{T!aFBE~q*bw`J)*y)em%o81l+Hkj{3SP(U5=?_` zs?&J1XF8Dhd*dg`Y~1NT&@~uWES;A6BPiI@H269rjF%;$ zmdRVV;QB5GM9bmJ)(gEj+YhEiLpINpXg%&NQFWy|s_BY-`W1Q&?JsbPaZ^I%Q@pEk z9&5?uf8$Zwf~X9?c#FkZ$WK7))3@O36=(lkklzAMn%#g1q3_!ss7I{x82)dxq8GU9 zlMy=rAFCRvFPN40i6J^C)OY<7GdlVf!&$ekncBB|?^f~r>-MnQo%=_Tx6h+^(}g>k zZ=dH@_6^c&kIU;PAs6~NC)fJhN0ZC#;diKAW@rL)(#3w~f-7dm$%veFdNrc&&atWr zaZLcB;=^3`k8W?cXDH8Agc@!0cDI;E$jRpk8{Lrw-{oO7ZFK3dO3N*LDx|Yuyy;o3 z$MbiqY^-Cwd;C>oI|5u)KXR$VM*L%A51Xi(RCQHsCSFqCAjv=R4q30H_~K9MZJE7} zBwN(iS@%_5w>Oq;+LUuOnwk#+2EyDSsH zryA>;C!1FJBCiSOh3N>s)2pmm5F|ahE5>0k$eDVGuan&5!r zuuNz)B>lQ?`lYdajmuOF!&d&<_*xc!c^8N8BjzF#or!lP{aztF%g>{eZ*Xfd1sMRH zxlDON9+T{g8>LC#mhw#Lpd8EQ0=mJw0J?I_z)$7C;eF&x@}yjci0@g6HeA$DzcU@w z=e#l!?Ic^#f>9{{hpTg74kTLJZEV~2#I|iG6Wg{swlfpko;aD83ZZQHo{?y37# zo!ixap=+Qo-K5rf}8@8GUTDvE0Af^*wvsfH~;>9%erNky-_mV zB%Yc4i~`jNZMH?N$ruX`n!#blHMtJ}Lo#amBaIejgTWL%K(^OAIT1y{A&2m*2{$T- zb6cJwR=5&rKD+#G0!;u(ZU+8sU{RbEBV;Cj(+?ug z`T|8BJa}v6%Br9C$kfjAS-5kefu&&DUI{(~xO_Y%5Qq-4AW1PqL+Zpm@fBsj1M9uK zZURx`!Y`OK*pP_7j--*_PCWIa(bS)8z~XiN;tB&2!UQGuhv)t{17Q($3yu|?Wvigi zg}n(+rPPs54BRRt#=kLfM)|LrS&UT+K@8Z*z>y`yWb*4W=c?*cT4oDN>WK!_f5Y~8 zr%yF$bn{XzO1^X;*rrh}a$AG~Q!&a%r|R7ntyEmq=M882#`TK1RbHngO;5y9;M%zt z?d#h_@a?C@k9cDH`0WU0`XL>s_VuulF$#ftc6u1t6@Eo2KG}#-F&Y6$^H7$IrQ#93#S;WKDP?^_(mE9{^4fgUI~J_EK}{=(e1Z7Q@`Vqq72o`K!X~xx72m8C-TW?O zgk_r;h66=4J5qXI>>+c2?vjHlP9Qa7?w4G>b7LGSbQ;gip^IJnhRQSMdP@=XlY|{R z)lShnMCurUzYtml4=hP9Q+yPToc|>~8SSb%k?nyy;%KAH{uk=dW8z9?AovMwKxMd@ zlaomDjW%8r)1ieAR;3nXP*Mv7px`Uxs)jQY*Rf?j1pQOd$Cx%v5_n8fEJe`b~ zfEs0iuQCgVZ~fD+;V!Y@mefgdNWLvE?(fdwHDvzSv3gqi`^$Db=O9+8*`=r7Y)i%Y! z(eEskH>h5ekMSP5Sgat2tw_;bcF-TGc-0IKXjKoj?srW9&b$Ik?M=N13(<0rkS#^w z7Y!6@i7j`g48hkWVMw(-Bl!MWq#n8Sq500HJ|WXRI)n)oED@(6)9JJ;ks)3z++F?I1Ynh+*Xze?cpDs*;UIU|u z5{xc9gTVzd^iJ0Usw3=Vf-3?eNs7}fDfu&myhxIhiV98TL>8KZT8>oVPjF(oIIhi3 z8OcMya!Y?NtA#u&mAaQ5KV6Jt7c3D2bhpTcqF*o&+=lwGiTFv~rHOFby0k-by-UJ! zT`J)$yw7DkFKHsw--G!F+H6F~YRN#2CtGwGT)rF8mTSNWwWl>OJYLXJ6JzP^a0(=0 zfZF>)DPIK&)$tr`d1gfZxS8Q1U+q$xuS9Fm{zYKNql?Z#>)GDGmgDJX$PUUC|AK6s zSZOW*K^s0uNY_IcP1gfnYhaekK2!GmaC^y23bhvj<3i3tf#-#@a^*vd#)UMMY3^3rbB9A^j1c0hDm zsj7}KuV3o5gwVK@vr$}--s!0h7{>^Va!BjA@;zCaHl5M+Fh@3Y+w^o+0Mi_Hb1 z#%YF_N(RumnUD3ybHA~1XC4@rhe_MsLN?doe5V~#0*imcFkc|+whNb4hq9m<*I}*0 zg`-dCB?4)-tXM>q3_GaUXNH3*a_VN@0zSBpD^*=djjpPeUV1(>Huz|}hdPkiQ-SO` zG0$@g(`ZqXUD|F;jcS&;)1mBvA`qZOcxnyNk|>ldqRRu|z+fx_-E`U$V2jU4mSqiw z(h5C+b+Trpl9eA4Dup{Ar)4T(9MXZ&e#5iK9Ol64_D1+N;PW$f*>-Io%IBv6I1}M- zgKttyn!l2fe}7cYw9FhLsW3<6!_;)>=w72cFKw9XF=rQ{B@mharO%t=3-clr zH=|l4)!W*${5oG4eABU2VOQS(0XH$(7=%VlP#!O z)?kcj@*?51Wa*x;MNYWcMmXkei%@@V_V7hR>#@TJx)d~f)8~65M1}#QP81^Ju=Lbt zNe~wWp-F@?+E&r<(Dx}a^?@+~q*-n@2FlYidEtZ8>fIy}_j%r2Co8 z=+Jk?Yz+4i_)v9Z575aw1_zJvyS=;KqeN%u$c+wi0n{f<%1nNe{Gs-y%pcWW()-9Y zm_n8zI}nAueNOt=|dg%w*hr$ul)@Ec&=7CcCu~K!>y3i&Gk7&FqzlqsMn72lAg z14Fa6C`yz?#NcHpoUl$9A7#ia5CYr<;89#V@|tpanw|9C6L|*B|gE$F1R zS)<1{u(zSs;uVHB@{Jv}$4|4f@+PfbkRqV$Myprx=O@5OrI=-@@j+*9A>CPeGk@nT zM!1qy_i_xt(ED#4UA|yXLIkLPWIh6%S7Y&)QZ~qLzpN*!9yY0mTtg>ML3Rc9PLaWP z6pBfcm#8)(k1jp?^g>ZQ(TtI^@U35B=D1^LFB#fCp{T+=?kIouWMKSCsS)a5giaUT zz(cw0#{v+!EScA`ahuzz%9uBMaZ-qNR9=KYp!YwG(Jp77?ukUM9=>14|Ji(KA1nT| znUphoE!s{?wNbYEBko0wMo+}9h?SFj0dE!eki<4Ze*cnch#2=;&+9{1HV3EdkxX7BKGNg0fhI7DTnfCHWG~YO|_rRg#G@s(~~7j za={1vHKW((6_jyqrBAw>FMdjw_k%*u>m57<-4*jW=H24c@D2=9_DI*RS+r3O>y#O^1?WCw&fNBK4M`(Zuk6iy6)_Z86_aE~e zE)ei(tn7y268^T*@e(kB0*!Nu30yP9^{_i8IM&_5pS_KUTqAk}4A}jG{rL5t zN_(8JS*%%FBRWe&J61Hyi)Q2)XCH!@KM>*Y1hQoLB$PF3*oo z3kv&0CvhGe65OA$W91l=u6jF9B2<|F+&FB17E&>)tA;rn%Z5a&HN2^O_Zg^)c*a)=cZn>C1hg&q&M!kPO* z{fO@wulEBTeMfSk79Q#yLzJD$SN%gtDiMn=#*K?jb_nxBm@D*}7m2;8bMzD^l?mcR zBx*k9J3fi0#j5a>=RHKBKZleN*xkvl1N9TiohisJgC@LBj!{k&Xm0$ z%!3)3r-BHD6HAAp6=hK*E6Wa4vLgv?$9}Oox+l-KzNr+1NJESBQWS-JH*$-t>g#IH ziLfX!24$CWnE5iZ<|-%t&U4B%MRY0Yi_xWzQudFci7LK%AfxbXe~yQ5Bh6!*`L4zM z{i9luTOwHq8uuWE-mnsTN-SJfBT*S@KaJz(udo}10MQIhXAC@BX4DHnB9$qmU0enG#l(?uBHFVlNB%>bL|H?hTDi z2B>p}@8w`aCkL2S+8@b)VmZZ6z3^tZttre|k}w+cN)#kG%udT1mEy{S@Vd)arIw5;NM@b*CQnodK(N4;|8wu>Aae$% zh!)wgNOS^k*+q027H=Tm4h7-JI10TEqT|=Z6Qq(ZkLdML45lf`Q~ou`rl~Zw#E{4b z>C$@NPl)tGT9O1^q|7yo@g!ccP2#2veh}?wPoQh2*V~GM#3w0E&Z9<>oPRIeseeE* z9J>F#svkVH=ks3(@EaQc9<;0Aa`pz1cbhQ@;08SI$Zp%oT|;DA)y2Y$V0T+He^0 zbUnc`LpK~ud2k@7PxHtoyZCyz=EF<{rucUd6AqNslfSuul${*9fV)kQnu|r5KxUAV z*B|z_Kbugu0C*sLFD4226^LgOW*{dB))pwr%qG$dQ7>pG2qAO$5+O?|dOq7r2S_{X zB(aIF+6ULn1=N*@gJ)o<2Xb##8&;Hx%JbbIPQ!(+)g#oC>b3uj(&9;sUg2hfPOH$t z0rHGdClWJ?>FA|!XR^UDOYTyLGlL0okq?^8U+(A=V9csgOXY_Jg^P^-ta=;^&U`Zd zbpm9k(moI9dkP4#iNheV(mge(Jz7d6%V2KsX)?dPx;q8c9J53m zt_C*)3VZgK>$U&+&_%?lJ8b6XSp`I_DFj|tJWc*uW}N`O&9_^&sIV+swVqk})L>mW zsuOFfJUGyO2pv%$zQD}C5X7V}3b-a)3ox0>pZ;#^ZMx`kp=*`hL4Ho3NG$Ch1*KI_ zP%GbfWvQ9gTMVALQ{QS3m^GZ8Fkep^8U3IHl87*i4iq|BZ-zG|+A#8`FgsbZZ(k|e z_O>rmeYDe-S8cX^P~IidzmWrVZAgqgrfaaJ&j_rwrc)_8$oZc*A+EE3^QaG9g--Us z*D`M~T2s86kl!QfrejYVS^jM`V85IBtKxratR=r?gg+&Hm6UTeR6><3VTbb9TP4oNaiI@%_CCA@*=Ku_Gt4)=i^M0Q*EU0-9 zOH|WWoz1N~<9#}5S*1&59Ik{N)Z+F}_D3nvxYW-?f1`=?*{}~b(gnhkjKBA~?6VE_&j}wCHy&hCn*CW@ zYKU3SQWK7`6Xer|n+JN-4c|2BH=Li&1?|Bl>EB7VHd7Gi(UA_PGqiMB0YhT|*>QZD zuQg7WNG*WV)@^K|laHDyvQi@a1QE zdhl&Y$3(x>tNlDVb2(4-yaizSQ65ABEHw|GZI(AUaCdN4Y1GDwzIO?F+NG~A`MQac zAMRh=&#&@zXK@7RY9>TB8597xS)$0@%l7)`yOVN406PO8UF-JSm2E)%h-FiUuj#+` z-vXKyYx376AIt65RFgg-)&UZ|>nT6e>*)!oASxxV|I7=bvuVPVkIHwHlB+4s z^MImNeAf+c=ey@u{GCVOE*k^$%E|lD(V%IarF-O}>+furCkDpU^ah%fm95B9ozXv1 zeWpVnK_)>=A9rit*XXF-0I&I;E6FY|^XY`fyl_UIN$!6_@7CMD^MN_KS4=FUc3g`jDkTkMZv3uQ_x~mN zYJNVL_Fjfa$lLlA9Xis=l_*L}!j2nl5_jPoMZiNUALg0Es@1S+^|tTU6VJ*%y|qed zG>GQpi9fvZ>xRe%L|$>X*jExjDZMl=K@|%=A8gsGP|kU?Sq5vp?E?sa5HvFfGdI^XFPS>g)x9A!vM)B13p_cf0K5{xT@X(11c5E(f_-~l3x&pj`&rrkdF zweOJ+ng0$}T8|8aIiKl81+S;Ut0jCnjW|~J-V3qkUoPsuNWUI?Dbwt|V0zymo2C6& zw}`EjMD*u~;f<(waK5A$qdXGW_^hv zPn4%z)&W9luoQgd9vX$e&M+n}LmZ;`wvJv06Hl6lwOL7W5z1SApx1O)Scm&_K{cIJle zZq|1Hp?$k+`P<=J;sVo`%iY|+A9h@`iWM^Dve^}Gx8=_KxGzR1t#?X_?H8L?iIuY( z{S_JOFRMEM*A=T2%?h*=D*MnrLE(&YO=V*k^)Q7MsIbU6jSw4g<$@?yS^z^wg$1q% zj1GnW@_EinB*H9wG(BAP|A#4oQk}fIvx$9~GOa zfd(lzndv~D#>~pNUR)E_xg%I(v+dBHUt!rfNL>X`Wz5@f<|M~8NwG9EaGswWIUTza zX(;s;QWxR#EW220%(Hf7RNH-z)-)Kb8|aw8<^8FIW8kE|w-7thgrJdCYU!@wx_^KR zRyf8pR5q5%>sa+aQS2iAnlE62Oriq&^$i9h(bw*NlJ1*{*}XNwPKyP?`&~;iLsXLu z#Tj7sXS(?AO;qI(WT}Z`mpnEaYlRgqnvdL4Ax!2^sY2k`m@6Eta7qr*Ukcu;bXuNY z6ce4;_Ivu^3(zL<$|;ds*4Y|8 zrj=7$4+`sBuL-8M9d>y$oJx5yr69>RRe#9-HHdT)>^L}PDl{WV(OaTzijBRK7{^k<89~>Grs)q0bKMsOK82w;6M%edH zRBzN;dGClnhx1X;WM&1#*&ixZ8xpvyL&&MxS18D%P>g9uS%NEK^Qk9N_wb0d3=oQ* zkfL<^cRP++ls1vrb4jNd6%W3;T3U1cfmfTA<;whD@hO8zB>9j|%F%hva_2x*pTTu7 zhT$NBC$7qb$JNL%blih8m$UI1V>R2p(8BcrQMPpQVs)j4?Ybcg4XAjQ&!# z7~xnSQY(7PU&$px#WvpL8Ah0YFwFNQU!^og{f)H}sL|E}_D!gOXY1BBv_Xby;q7}1 z*sQGPo8?Ixl1{uJE%Ylm$z?z@(5jg<7z7n-dC`b|v_(^<+oENxHEUggMPX|w1e*!r z+R7{PkZN^l(b&*6j2p6gKs%HEpjkHFBHxSsU+3DpM+@$V*6LDM@zEsOB(^0uR*YqO zwo*_{E1{i3$0st(5yD6~AuoflCGt>{dav;SD1N$!vJ5ktm%#RVA~WD7VH7g+QvlOS zHgnH7M9Zj+rWY}ANdwX@z}p{6cdR0!m!FCYZA6`S)`>tTqU`tm9T-H+E)iwD^HWAw zal5_pI%lta+Kw%A?Bl>56*nDAK=g57i6YLB;#fk03&I%;Dm-@#1py8ul`xCG5k`T~ zIwv_Rcft{bJ?}#aIV8ZcfLuy;w!kDmLtfVk|L~wrQqZ<`O8=O=FQ|P1)%g*?*)F&} zRCKCWwRb{xt7v+5Xr#s7|Bw_!6c}f`gmY?%?02Ha`jgBTWrW!mrM)R@9Xe-4iRCrA zj0{Tt0ee-9@p7&gn(Pb&*6)gW7)C*781@F7C3Soth3teAMH>)lx?KhJBr#ETu16Sz z-T|usF(&kfAJrU*cso!}UGkPf=B|%2fK)wyx`e23ztbnVM-uEqlXVJ-SA1ysadgTe zlPy5WqD#oP-Se-$HN=hkcT##m@{_n{TsEn@@Gc2aqN1g~o7*10C$~p%R~Lm=PZ8Tx z%2S&pJYE#8vmKy6)i@F*rJ^h7Gt@`&;9yC-|1$1sh_A&khg}Ztem^l+G4e>NB9BrS{HrGA!rlRR4EV(D$Ml9SEp@&L}ySLsy$ksP%hJAtvH+ zJ5;84-J9$_s=t2B_1rXE_jP3$mFT{GFVT&->^?;{%LViWyI<~qWlsn^DjpI8`Z>Pd zJH&a8%P)_Hb%YP-E(-&+iB44;py?;?V#6lLjHhCIe;o5Y#cbOJ??>osk z=K`L0R{(%>v(3w9`gg?T=l#-(+Vecic@^-odd$@Hwqa%H^|VqGY5=fg6LsBAIDY7E zJVnH=wRpU{yhCRs5O;n!zyDEM`MABwJ??-MaC@xp%llY$XK9(?sV$R^n^!y^P((DP zyg%Dif1UGb{W&3(VoH6xcywe&g}-QNJJW!`3fLYt46^RZlJh&-Jj4tav;y~2)5G0& z{f_}3JOj!^R_Kk51ExAqi(oB63U6IL8>;S;6i`Zd^$%%w{q%b-4fVnV{62lb31d9s z-(pk_R)34)s-Xu^v+B zz`DM`yTGc*r7IG^)(%4hqx<&g z!oyRptYejvX2qHdDtYe0?bU%y-gCK@!GB0qMZBzo&C}D8m@O+~Suhw@QkV$OF269b zP~!z$9B=-EnN-Ify?9t$0-atRVHO1_h8EY*rx$5 zkf0CS>cVPq4+CV2SF|1w3))WXrzB6@-B;I<7ag*bzj0?JqpJ)brcgg0S2ai_Gg&|j z!%M@^nEX8vE_Tv`3PVZ0j39<={~cE8rUAqi%7l!#QLbUIPXt5cM`0UH7Fk0woHbQB zg_tV4R1ED+AS7;f_M&`b~QI|CYJG?DSvjINi>T+1P^qqXh>}?TLz~S z9_hkBaoTc0IwN-gi7rnwyk`h4&uR+~L_ zm~c@vuxpezQn3;le^CJr-xRiNYR38ud=P_esf>zBIFLwTxzMql(nE_827m zrIWk0tj#9iDg4>f8DKm;HG7--w0-<42b^u?GHnAqZrid$CB0)O1y{4~AD90euIwD~ z5hf0Kgf?FQ8w(SoOY`43WSjhtuQTs_&<=hg0q(vh&3&akqa(KRfTEzzRGhK|c3yg` z;q6ZadtI6!xfr%R$CB;br1RDjqSxBmJLt`my^Nr3#n0pG@Wu$a%@snv57Z^TY^(X; zRz}0*1S03@dv*dLW_>PS_W}3^w0T$u_sc=OUF~_Gui??o@lxumW!nN{qp@V1uQuwY)bup2q?tbJHJ@vsfrg@F} zal;#UjJx()+oY37{6$km?cRm^o?S>^!E1~rOr(zSeS1Nk4pTMUEl6g{D!7~i1a_dX zm@PdW#6o2!1s4>Kgo_-OnM_tW5_OasruXF3-hOk?-ufJdLbyAvqS6GfD=xdWvZbKK zN5ahe&*nh0M~fR}AOoEZ`)-Zn-oS=eOKAX5{`EkNR#a5~s-ERhMOS0CfGYy;M)`v% zfJTSF5q8o5S?MCVZ4732cUNguKux5YzqJhUYOJbQmS5~W^A{^cVA*!6h_3VSlgom# zy=(`BJwii4sOt0o)4saKkUy<|!3y;tK|qlH_s7Q0G3DY88c?5f+!e>jyWSgcHa%Ig zo>w|qe+c}cu^Z|k*Fl%rvWlqRyl&H~-$z@&D|0ejPrgdKuek((kU9&C?#OkKIlwU1 zVQ=+E&^gmVH=$uCqM+UN#&JcZol6UKarHbNJmzxbdLC}Pvx<(r-%mE@G>=}Agnod? zTY_0+h)mw=0_#)5m1SxILY`79iu z;*mSeKGzbL&)XFTVhJ|oZs`vQ;RE>QeSApP@e(s~EgMUBS+cH>3iC&j73LAYM?=;g zH;0<2IK8RQBlB8Nghd2QH}4u`1?;%9T~Qdc;|Gq80m>t-zKm7ZOUz78<_VmBS)`#% zTUnUw+8u$XAgJGheF%*OVo++x;xwgoq=qC^HOeKD|A6*GAwnTxF0^8Frb)p0OBI== z2FTBatD5cUN#ND?h&pL0=aKmRX(j>hZ|{C%{LVxE!Yi`Et6~vjxx$k}`PRES#70I- zGQ~x_0Z=}%iTred`rx(2`7xowAcRuT2wO}3%?$ENUPe>cMiIO@l(A}<^0x#dyt*oo z+O&D4vr4iPL6&exaYxqyoAiEgXo zr8Nsr$)VpwQYM(FbdX}R+y3l$#=j}Ys{R8>55(haZOrYd(C{&0(@({TLW>WODdV-8 zk_c89Ij<#-v_0WODvhEi3gM2brSiuO!Rq|=KB1_=TT`5++xN)Sd~EY4q9{edvMiVN z_$fetk_|Fxn>ftVr8pBpvvi2v@%Px}m5u~a8YCG6lo=whN`&Ge!c-6$C+b~AcpFX# z5$M(OGrleU)q^MvTDtSpymV*_l^An+R@tc7ep9nr>`o7nfixj_v@YnW(okfu) z@@Jy(L`zarKwYzcM@vt0bLP`S?}wTA?!S=C}b|8g%*#?HQS;BAQk;s z(%kyDu}1uhK;IEuK?SEQ;{?+rlM^E<51~hT!(6pr>;V(xJIE8QrfJVR-`zsvTUWdcA zemdUtaH^dT`VT~$tTn@eJ3=26y8KK6W-|sTxb?T!E!!&I!Aw;|1+mPTMeF%t^X(iUP)35u04yP$}B*;+9iejG?cBF3Xy$H3wx%132Z=t=jCFvDWVWkdff9O_r^# z?jYfbIuOEM+MU5ntKD;T5q18f9GO|Ic)U`)KYAj12NWidowdfB{MbUH zjZJ3{0~g~SmSS#1eSO2BtP|^HneFY0%sF6T0C+l?ZrO*CgaW>6Q%BuFMLdQ}JJw)I z&@(57Y(E-FlEC^!{_G{aylVH(%}s@~f}~@YMFARtPv||U?&RGoEccf6Zuf4%E zv*fs6+qsEdpgVE2pS&idlE5W=lavRQfz3s!6H`m@J|q%tWdCdCVzy$InV+UuxDbE9 zYJ)_Sohz!LjD8-}u)sJWxiUt7wc}l7L%(0MD>cRiwo4Z;^B|WU7BZqi6Px92O9-C0 zip$Q^-|)vLUT~&?MDy>XIX1rOll+5VMoYA<9m3+tlVH5s#B}q`kBe=#@)n9$0HmDk z^0xY!A-|=KKo7R-@t=~`$AaK&yEvZDD~2Dg^bNqPq_X2Tl#8!2cs+K_?y^s2juib2YSJD=(J1GP9X?K4NPSnr zsc>7>p?O60=1g7o58*o9zDBP$CmJbUHrWxTSn(3A#GCB@E5N}Y6=#ibH zm);Yi|lbbc#w%8h;l2hcuU2;~HDbuD6S zu<~O#8jLtXF&a7&fZbyS%)Y-qrZ{5qJv%ftBr+rt4GQ%#jax>W)Cuj2zYPv~-!Ivy^@Hy=S7GE+fq6w?t@$INdp@k4ipv?fzJ1U+toLeU14) znCxLAU!R;gM-&Nr_IrgrIeRl!5~0jma!1N$ZeY5T7 zQt^V%OE2Hh47t0V4xZ451c1$j#l;qJLHD1uMA`3V*Ix-Az9zpv**+KNdmXnQou86V zaGtmQLILdzg4+BOpWAyiCWpVc?0&oM+3l5_?F>I^>OCI~{+X@6^m|O?JlT8}|91)< z&E;tK`7a_5wj$N>trFPu`keUN^QD#-%6s0P9Cx%}YGE?5%;^xCaoe}#D0{d(TWfh{ zxVULXChP9as(LJQ?7rDLW0F#ZUN$}ycv;p`Dh(?IcinF;%O&*NKI@2JwD%4e05|{4 zsn2C%N`GCsPrBQ_?|3+I%zizc*!bQo^s%V>-D5N6uo5pJ;1&35bht8&^nUcF#Hh!I zsgSM)=v%jZe#eA-d|sr`-V#! zs|?=`Mn-_IQS{Zf)vvjX#`(uduhy4-(d{%RO372-_p`q@f*KErDW}7EBj{{j3Fvtl zm4fCh72^JA1=fi-Z4V+gt@Mj1NB!be_Q>9j{OBxiyoRP~n%o<}$Irc+L!>?I*ha}# zBRZuBTe^sf|1OTb?l!w8r?on3zy9uYby%H3NH0}%xuA076S$C8wfdu61O2n?f{^pW ztEa5)`C-+!*)o1Zh>eAXy|8qvdhEtBvZuP6oqxvl09S6p?RMMZnv!aVtX z#Ov;ME~UR5nK`w`_{{LrX#4j4_e!c9775zUY9SV^+&XRr+IG<526YX`$A$|76Vr)_ zV%7_S5M8E5U6%Rb@^o-&Kl^1$ zaqi7#&0wxnf$^|IZ-v6XJmt(~e6dsm2u$Z>?B*+T-f2W$O{*mgPHw1F19hKQjwo*- z88wO(g|Pzcs!x|dV5yo4Nwv%R^Dz){Y_WC1*@ZiQP0Kh|p(Gh}PhXCFG4c=n&;8ii zEe&Hyz!DItBRl|P9*bNOKTFu)#P4#GG@2`JaMmTt`HdFc`Nt=j$(Z!Vg+<3LhK=jc ztAAAEHPpXRVSf3p*VSEW{n1?L)W=zCPE<>?Gz!e*$FDQk`l}0Vd4^Qhw>mSvom3Li zNLVPCs`9D3OS{jtXK1*sxN2e2H|E>6n05#X2iiBbSKwT{to6s$ z6*DyroZliC5dRG8GC!pi>}l|peZQzQv+Gp-&Q``M%b3PUGqu2=YJoXkONYnfuFAGR z&|%`zM>7eevscL?937BCfUV(ssCYX0z9eZtc>Lt+#76E8E-diLWQ^5NvC$6GUHWc)Q6w|{; zuAM@;yu({TBVlM%NGl?c2NO%`wYdi6?V8czJ`@xOdhtk*NMcYn)S)rQ4idrWfSK{c z?hye?dLVb2&r+S8ObEJ=t5z~PH9LJ1X``mBLwS4R-ng@c^x!mMbmCZ*LJUj=iGwM( zo}@0b*f$a4H=-b5&P*5=Vf!HA`yhu}8CiQ(i8v6i2ANq$J<(9LWX{cJRd0I#ChL0Q z{n!St_uLV_!rGv$=nzLfNoJxP4Kzie@S8w5uF+{ASPK%jk#`iPbhzP=>8Zo@Ot|+J zGcxNK3^55#6$6$0xbnJDRXSp4Hv17;G}|6kt+ATVH%n&GxOLF7=kJAeMSmsJrD{iIX&Ra z%yt$n-TDG-vP;(0(PTi7xlMdw16F*&nDZnkPJv*ox@Mbuc@s7DJfS1O_CI>0sneYq0W|T zBL8=Sy@(ji+Wa(U7__i>YRcX=GYh~-Nfq+5aK&TWFK3tnBXeIg@bRSSv!r9kS zAkva!wz3sT(W^BwhJ!=Q03xIhg``;COsJ$0I6#CU`;4FTSjmhJO6Gw-$PcKtcjYL| zC=yo_f0zQ6Jv( zV-1o36ylGm;03RMH$)^KkGBcmYcT1WvR{zaG%?n9XI?#6jy;+vvp`JW?^6MPaOp4G zw@+4CYsl9?OEHlV8UIl7&kiLr^gonuRn!;9u!$S2{#DoswyT zrvxJbZ=r+5p&N0SbI!cRL;SZ{AoM|bnIgRS4fIeX2o(;J>XR~Jd?PCHKvL%)Az zXY%=M5BXwkSUYoXuLC^}K6k@;7xq5pk5}aGn@2B?KW~u<<8Wp_1QI^|-wy-Sg!~@X zrqVv&-fXFDR@H=X-|FO({ub$ptZMUQ)aRN2h;hjiouuWXh&p2$RR240f` zci>vpX+@uJ=cXO7CTST%o!Fhqn8nN&1G?^Us?d<5X ze7h`>5qaXnSmIBY`1Z$9pA?S;aR_Fo3~@jEe4!0G*a6^!0C?{BS6U`D94m7Lx$Xrw zGH(I8$zg9-K7m^8>#n#u`iXs;^$(vluz{TyYA;c1M+->VD*P{#x77&~aK8<#smd{kZ;mR)9_Xo^%qmSQeb*HY2b7|`2)yN1!9>kaZIajzq2mVL1Rd|(N=ev72P9#wi zDGP6nwhg0C1aH-e1u1gc6^))o*+jYTNL;_Wz)R34d;7M5AmTc*xg9j}UeAADJV=;! zOBBBLJxv54AUOYT)(RLpIl8zRnmXD$eWgiU{x7n?od@0x{ohYEyk)$x0E@jO`?}}Y zBm^i@U%RL!Cp6$ylu(Lddn@kZJb^Ov6e?=#m9q`LTLieWQ`e0N!2fZ5v!my6*rvBp)J^O>?}m3V(k*Ccj~8PM?*0ZCJT7LGrRjks0U+9sn5OQVPp zBQAwDm(o4f-sD32+rdtBn45&rm0xo~FG)5j)1=sDrW}IBoJ_Xr(pL>Rs)}j2y9@_= zEB5Rbig^_ZuE!xXl_Uqmvw>#qHm=%(@?$sFoFenyLoodajyjqQ`TbDezA7I7F)#Kh zpPPnMpsW*u3Ta?9Q zVebXC$mT(`q$g>0%0$mz2%VQ+5_oaFe=O;-042A;+ZGK&i_~9NOxIAHXLPWhhhenN zR0L~Uj=~JK4z%IQpO-_lS`UxDjL+Xy}V-6R2|g!)69?^25(4F{6NA{j*@daMwp zKuC*d^Wa}HB~1zuhfQfP8aV;3@SgTmMukT|0q{nw+A=1RUsXBB&px=IH!p>d{0aDn zz5|&CiiJ{4?sR=ilTEhVVt@ZxBZzn)GoZ?9?6Qc7&D`8`+va1VQE81An?`-$%a|u1 z!g*2DnBXx7NOOeKZWWBc#$J0u4CgQ(0FA*FPPK!wkkQ4FjFh9?KXp{L{jf4rSVzc;qf$(T~YCQv*5X0X-o=qhjKe z_u}IoFO0=@28%*3dq#e*)kZaS4A^_E{@$}M`@!^WQZd}t1CEgyWeAS&rfg01VjgWs z^G!^5jOU?gJBZ-8ctVj3p`{}pCb?T~4YykQ?DRRgkoz|+sen}|Nj|C?JDBg7Co+l? znXfzM$PJ2(Zbk3SgI>^?`Lk$xs(W3hwvvU@GdiJ1|45MeV5CipKILjo(-jBXkunb+~hAi&cW2%zEzmRN}Df6ch8UIcq zGJ3`~ajz5Srb=?kH&3XC7J4IqA|yKUnEl2;c7E#1{ZZJD|NAnR#;w+U{sY(Bes&G>yrP#z65pi=qG6b_}MD zB!r&D3duwr4l-$9j)mDA1vYK)hV2$|RA1U@icH?(TH#>7)8POi9QZEGM{H&JI!eHj zhkEx{j$j;w*J<4>}4tm%O3|+IHVKAMh1uX=(Fq#qs zL$4Qs5}lt^HMx_(;EUrV3S0iCFHj;;0tJ^VE>^cmvrj%DHlnRdu4}Mnq~AI&5iZqp z5j~BFhUC1-qn8a00%&D2^G-`>dzZapFQzdBC9~pQ$5Heg3+0eF2#S1OWKp$tPl2Qt z`afKqQ*x zqArhGS6;Jl7#u;75rB&Ogl{w3-GcBNZKGN!WJiYBrL0-rGSx^7F-E+99Zi&TQYu?NfYcRM1< zV^%W%$d%C%0|=vt_Z3&I_1g9z@}7|YR&E~utHcbOhL-&A^pijR7OfNWGQF3GTKD&K zZX#TIQk7a$x?BNYS)pN&xC2nlwv9DogqDw!WP6J)Bx(QGF6}@qH@CcJnhfMA2mTbe zV`|j7sw$#$3pfUnkavp=z&snuBc({7uqs0=)kDhgRaZ=y#uO3}^pX+Ddi%EGC-H7? zD1u3)Sre-~dmOjCX@~MjG~aSinUW6G={dFo{|!FeneMVxlavPLo5AzQU$9yba%x?sbxYQr}Y za85bKBUf+y7`N~f7oLOxyRU3R$7=_qmb*G^1S{2Fb+vDdnb{yx`6aKJ4XtX-&BX+>C~2yMwVIyRn% zGf@vhbG-f*&&9#x|YsN~y zQYqx>E6}lB9-{;oHY|2|E`r2O9$|=BV=nA@4nP}Y#l(7G1UV}B5<=;=asJ-wp)i}D z2gkXO)deZgGM9BPt0}rDU`E=F*kGrDVUd6 zk0*TwX&!KIY&MFtTm(xfhRq4X^<<>js2H#U5#mJ^Y`9DI6$YDkU3eGmh`V1yiEPp* z6mX>e7h%QY`u^1MgL+9ijz0TmJ4LsorM!|}8q3a!d05k!$T2e?p)>+i(Ou&ul)0|A zsr>d-;)qg`0^|-_pkCjVm_bjO&sV=fI%C%Gza}bOdcSivBC5S$C@P1{p`zhnNzkW_ z{s{Nda7O5mF`|6nl~cDBzr}i&cAtqfLIN(Y!;DVdB-?~AogMeaq{&R=*?q^KS)jU_ za9zlqL9huaTfXIX=uTd(zD+CSS1)5!i)B;-rR~^j4+k)I=~kGTcFpN%wV9_ZFatM{ zsY9V{%GX!`;>p!NsEIuW{^~gJLQ%UJ8)vrKt~zTyDKYG z;#(I+@#_~ubCQ6`6@{fZD>K=|2t;3=>y}{kunUp#ebxf?Pb4_ouv3Rr}48vaN_h5NpB8SDbQS&3M81k|;<~ zJ&*wgSDrIaeK3^KyF2s-a03eYTk-Trun`mp3Lz{3y=(^KqyDDKn>@@^2$YxF2>D*7 zNf`(T`2x2z$i0agcZjuS>sWfw`Dwf8Nu}>_OF!FjgPF#H2NGbeov|bVFR`S`FBQOJ-$ZysJGia-Q+^P{y0=?_>A@3ynpR_bAR?b(mm_?Yvl4=N2-^$eqXm( zd`mjc^Qo~F1b)7;xX|_aoA(=Vv;Rj;&n<;`tYhBUk1*|e9(VIm=p*E9Ml8hVbuzHn zVMxZ56t1Wu9@&oz82y-EywF|exaRi!j3%o5HyGe~AF0SvF5Efq`pD+EPCG*7T=cF_ ze9ZwF`@XGF>-qTnq804YFF8tXT2!f4VAlHp>BPrp0HB)DtZSn48*9M(P)}&>)4*Cc zyP?+POiReNDvRiC{;o#{^VL-Fe*SoIpY-DS!flDrR=@}#S-V}6NyJ121V5e#vR&Lk znb|`q_uquv4IHisU8N?eT1nSt8qL)n{fbGo?C)j93I*_`n(y=K-)f=~`3NsPFV=&8 zYDM$9e|r8ac#nwB9|32KMI)H+XUU|199yP<>shL`%Xu5+)|o~7PnR28Q$|))#*7EM z+V@4<3r3)tvGtM5mGt%dPMVbIpuFxwQit7Exo4v-xARdJHIsZ%_R~yFJk!!IWL%)p z>wN2=EN!Z@rq%oSM~AA->!8dSW3#T;)#%fvEp;dF%W#t}qb)DaHLF@Cr3z$;dj1-@=;L`A{pq&*3UrnW>*&tKGojl4 zT|Ml&8WH+xfvP4wpzw0(eyTF_pbS*4-%*=*J$8>QK5Jd9I9SapYuX#SdTn0k)gF}z z9s|#LHb=6#XDSD+eU6Aijy~GD+Kr!YsE5Ct?%N-a89p~No0*#4iwBx)1+F2whAiLB z2Y>~QHbhZ$*LDHh!!qbb@8@;#(!!^eU)i0v!{L7*9Y6B-H+wIU3lKPyZLHfLgI@Yw zlLVJvw&q~nv&-+oK*te^6tqzkJKy_g4CblZSNzBT0iyh8NMiLR?H5(+_=) zGOqG2*)6xC7jqk+|qIb-qb?qhGLvKuH^bL!u-w2S<^=1L_zdqo5zrVYshebS2}H_((84aw`g zNw^+ren2CQSCcZ{t6NO38)Lnar=~u;vM#?RL8w@bGJn<*=oOv@@ zPx~=DV6%HDlaz_c{nqJZJEw~Qywga=$%AcHEAHFdRAX@HI*r&}ww>te@Rc2R2O+#e zJY}jGy+D7;!lpU|idQB(QSDDZL*IY~vxFKBTQjXWj8h9sR3w%;z~Uf{pe47F zTt%y{*{8-eJap^$5~<;@b%%TC$XWDns+Pt|@>wmuk~MK48{g2u>zDH{!TwaX8UtrC zTzQ(YR|88o)n=M@DzA8(oa?<&w0srYY2O+T<_y*R1%}fS0)?XB1@WR*t_ozT^7}2> zvIeJum+ZG2^}?dP09oZKir^#a+y99HVVn+0SNsD+gCT%{k${0E>komG0?w-cf4hlC zaH#XJVQSKK)e>|+R^vUg&hRZzCxiE^&QcO5gC4=fiVv}#;g-AWQT-ZPxGDlBh9Vaf zW{LCx)5Enwaha$W7PL4zhDwBauA%QSyC4D}oa>bKGSq*&nmh`QeiD}bD$sTGD)5@g z=ix;}{`tDHK90!-L}^Hcz7mVl2HyP_mUMUzwL-7^{D?wwTcFC)1fPGat-bD%Cv1w=I?A=!=&Y=Tkc2 zq$=X%$hi8%=}sPJz0WE=Eu_toSAa#HqnWO2JwG1I>ymB&Jjzl0-r4GTvBT3gRLfNV z{zg#EqUGPdmMOA<0$ZsMhbcUO9q~dcJCUdCMpNg)2-pg6z4U(HG??!=&(BXC~5;IDIP@ME+{nRWqV-BIO#W9+fZwkhM^tTXuzU zXRAgxv~*2?T{8a;Kz`wXM&3XOfyS{d5Zp$|9YJNYfq4;yaS=)8-(R*LuTcJM6h}rE z+6GahYbR<}PHId|J7N%>Fvdk?mJhzpY!~87XN^2RqO-9t>B~p& zD9SDk7@H|4=BzJF#t6<{#Ui3=!U}A6^zW&*i88s3#gy7A5%sdEhs%X^%z~qRDWTPX z->DEcsf!yguZyFVP$5z?!q>&g59fy9!;sSdZrJq&#X(faiVXL$dzk6pRc=4ro`nW` zWzPY8M;KV}g1qAL+E*1NGSL@PqU6T99&rU$!B8v9wZ4_7(^)7NX=M4iICA3-v2Xwy z2-mrA;(#577gP~Ti3u|w11&Yu?|$7z-D8)h{1i;d?HIVV+{=yW$Eff2Sg^lY16bH$ z`_P9IX!altk;XJ43h2Mq1meYK5n{Rywmzc&8W|wtm{O_|J9Sw#x2L8a?(Q4nF`x#V z2ISFISklm&+DeV+Es1Ks>YJjL8h8c5k;^X+pp_VLt)cqXq8$bbfPDpD=lgciC(;{k zM6J5&;1wq8kgm0!jQ&_n1#c(ox3^r70cFUTQa=-{wCX0weS61(Z%e1S2e%PH_A_U!X zhC)H$^ar>JzmaV);(+DNo2i4*o0>m2_Zky%ySyFuce!A*H25FgPTRb< zo^KloAN4%%FH2{dMyfbBpLfe2vp?=Gm=wIJUPnOlQRPcppR+=rPK8}JO_v>Z=Q~Zd zRTCe(yS*8oX9}Ah`^uN?_d@`x&(j{4t=Cx%U211!a@XSPew)x!|M{`zO=xMSllk5r zrkY+!PVcGAQFccYi8biuegaJ^~3!xvm{3aOA zCquo9v@<7IPfk5Ip6xGCeyf>!lDL@SLgPEwyGX)lXJ%&ms9oB-XYhTJxRUze57jK& zOf94|oPMKhHzfQE8<11wWyII&@NR0F#lxLV;_*Jv#mF!*Nr+t?S^vbp_c|V&pB_Bn zTgi%{#TDnjQL)5vW)4Umv!q!L_=}DGLTYrt zcQ#d+Bo$OwPXQ>JB0Rn(T-*P)Ff3zc=>!};`_id)=0Rb-Z?P@;V>Hzze6_6C$10`V zHUElo(7z!}LqvP(e10iXpPN+GoP`=X`Iaq57iXSC1LP{@UquM(I`hP1)-;*y`W}{! z0*(3%L1{I!39I>?&X`~s`32b~MgsFS+QQGva|Y<-sxK-1vNJBrzW?iyf?*8v5{Cr? zJ4Q$rU;rlsQ~@hOHDch%by6oo`*zoH*4Q###zt<=Ei#i^&af)^JC*9N*~ z)7Lm{uZMr%waRzMzcdiEHDSy>^||C_dEM;al=zg62YHJ`Va4F&L`0>#OQvDoAY~yf z0W!_bDIX14o|Cgj@2$&6@6PN;@5{-p6HMw5bcA(4uITH(_Ja#LIjO&C^8`z6YrH)Y zo$|Hp7;=h@bsSe_Qfs*h4h}L?yeBOaV@i_Z>>rpetMo5^lk{v_diClxP9cvO;X8WM z3`@s(mSAIb)_h+Bt^ZxWqYrBAIf463>8LznHDjj~yim8*a8MyIs>8Ucc z9ztXRGCq6ImJ6K6N|VQIUKay*jYnIX=8d_NbT#jS`gWCz5~oh&H;?eDo2^cGjHDLn zPbLQ(=yFPJ|y)m$V|K6{HEkHZ62rn^cx~Mj3(rZnz)rn73FGQ$) z(+citRLs@a)@GJ@CZ0b!K_SkWpGZ{N1Z#^0P^}1|1UIZdMLRioPf zpc91<^@$VJ#-b0LTMZRe;*P8q4h?qqqnSbQK~i(En3~YzXx6}xm_v{kNbzO0}26J+(*66nQaTGqxT#HJ;Dr*ALXw2?@|iEogjrq&ina7S}?zr`P{CA^@k$50k69wt>P+3UW|9y1!n`i6BNt-J~$fL7+qtoVz1VOgb}Kt{z#lQsA-jwKFJ?FfSS z+JaGW;@b$3Lp)C{t7ay}%&*Z&xvFUa;7MO8b~*xqy0M8ONUeZT=o!r35UnC}JFzKD znmhw+rHCSk2KSo^HD!?D_GTXDgdgEk#E;x>$g>C4da-$6crQW_#O&B;$8UkY02O3F zrv4`UVWq0XjIpi^U2U+KzeAL^f55|z3UBz1RG&k5vOV8VDu{0i(i;u&M2W2nt7RG* zh&?Nr0o)VC7W|z^_A@f~+!YAMzxphYGltSemAxW&mwrl!`s#CK{CPr~UV*G92`(?z zNo){C&C!LIX}kK>+I7d%N_cgd2e=BaBhT2;4G16~ocZ0{vnN#*``{nfgbLhOa1TIE zIi)5_g5zNiZmNpcyQH#qL#*rKBU-{{IZ?^Fzi;2~4PtQkWmAO^#$kIL{{0f;KO*ew_^l&9 z3H4uPZ4oC1S0{D)Tc(rx_xU4B=?|c3rymEhLr_-??#@=HfhmUO6{=_~8aBgMQc$DB zYy2HxigUr9%T@bToLI`<4h)r+2A9Mte)H$R7YoJ{mLeZ0!Ra3gTb>MqErmXgoLPgG zlt40MKjJAqma;5TFWS=+F-Ak`t-DMmEr1UcYq-S2NbQ6yKwkKSI%%rPTw&?Ud0>pO zDVl3Qtw{+Haj8)!s7qhLj?u;Wgd5hy5#s@wvZ$c4We)O>I)yvt03y-sc6c_`${We8 zKi4kh8bfGqqggTq^T`6nXa-@g%dwbGwp6+5wy`Rjo*6 zJ-u~JRwN5PS{;BuQn{umfa`I4eGRbpk{pQGDORwmyZ{dt9bIUZ49nKQ4Zq2tD$Q*t|$t?C-~i zX*mj%xDTz^JIzmK8a_rW8ip=V@SeD%L_pHv+|xhbz-7D2G<**u+eG(HmSH+|tBkWb=4%J6as7m-((r=GjqQ$GVvqKW+E-EUU&m zJZSi7YIlZ99ejTA{LmzzEm9@bhd0|aZdwpP+_SRG3L^hm z@3W?jz9hAXdxQokRuIB7a2Ssv6P|Q5nu#tc9hhMvZBycK0aP*o+JokYwIr(*QqtgE zVAfn;D-JPZ7|Pkq%#}LlfYcnzMpmelooQ*q+-iZutfag!-8Y6(M05sA`O#UUP zBK(!Oh}cQ0-4*RdsLW{duy)-zg@L;sw;$1o8iy4Dd_N6nVx!F91#zMet-ce3C8y8l ze8Xf)o_)OG25S0>tOR4}@Y!cam_6+LuicQAIouamQ|V#Z9j*-7z?Y@&?XL*->cG}G zOTN^$Le}@b2x>DFiz|np!@;ONXT&a@cgrX#9Q=s%vfxd^hE6bH^H5=)HUFbcY=>1j z-ZN6~>~`9%%$-FbyVj$?nI)eHF`kjd{#$}1dQUZn8F;~i6yb8v$XIqXzpY^R(^vZn zPR*GSoFzv+=tM1tUVTDhbHJX^e#@rm;ubjqmv25AzXC5IQ)Tt1CxD(_N7&sEK7MHJ|Li&A3c_0|& zniEul2pkge_D?XsuNvqNE*pAK2~o*r5Aq1WUq(_JDo%uAOCb|)l24K2PDe3!K%`*S zh_e)I|FlWjD#tI~b;Gf~osq?Rh7?omSpHE?AyzdS@F)wzDjXd&hhHHXDhw^o%VL8K zb)zDEa>)mb1SA|_Q>%YvQ88~gwe-z5zz04e=-*!;M}aN1;= z3MF|giZ=M8y;NHat|ON#~*Y_@~8 z0O26D{uRQfU;WgWO+U0V<_?_ryxY}O)Gu|~g@!pEaY3NuV*dA2jrBwaH#^H6I9aL2I{^D?a%xq&$b|AmJ(K7_k`UvoG-|!k40T*}`te+#ypzFLJ zA%6@M5?PSKx?jMjqMKuqu|}xGxkY$Jfoi6;!o4wIFsMXI9j_XPF$TUeE_Ovm->|b+ zm+q0a8^V742sHXl7bDY7epj7JD=wJ<-pOHCDNGCISMk_IhcI{Q71=h_B{UNq*Z|5e zL+E0uihZI#t*J57Gk=u&WJj4)1~s25hOWzvK8a{+A0vFCpgl^+xHK!>c|)zJjk65L8`RZOIQ_k7{_fu0?5GU!2YJ>Co1yr2NAqi^z4nqp4>Pge+J3FI)?kS zVuhlMJDaWno{@S8n{F3EwFD+zkWB4c2)iP=Vi0YYI%JXsLTZJ`SId}@0nA+tRdxDr z!&LfwQOo+D7;u=1Ix(WcDE*FKj2Za}KoQJ;2GN!Ahaq$q*3Lb_Uq zHPKlN*9h(-l+|)Mq}V5hHDJ5aAFqF{^0MGs-cuj6FZj6 z97G`7kByc_7U+H86lL^u0oHXN{5ag7n_QZU-@_V-FMJQhkC9e!Z zyn7jV=x+607JT)k*gV007e~4awXU!;we$^Zgej;{vx=^a^oOvw!cpV%=;x@Y)+FGb zzCaEnN*7!CsmMG%h7CY|R#Z!Z$YFDc(pN)I`@U|RA()8y6TxvpGh34d{1z_ORB011 zq&CLN^6NgEP0gYe&i=RU@!b1Bk4c(^C5ANoKG&@YIl7P33>kH9SZ5T{JXxc>4kj#8 z(?ah!KfgM4pV(iWYLb+37d>y-kt32 zPMW-X_J|ZJT?HMXuTk80l;N)$T;@0E4Uxm%VkR&3&PM6W+>?@Lc9j}IF1kPen)|v6!OiLzh~nUmxIBo8w3PN zK9C>Cxh;tfQFd4x*G?HvKOP}Hj7)aoZ(h6zxu^eW_bYCb-L#ouM=b+YO^ef1pJ=Bz zO%(K5=W^UmoYpl-5Jkbo2wULw-bG*FFO z*XGN#1~BzI_=Kl&Sn8@dAD66jDSe;#gr!9`1PIm<#ce0Vmg)YUs}&%i1Q( zIQq*`wW7c@!WtOnc+aW`$`Xqg;&0PQ_p8vjXGHSAALhw09^>g>aS-b1w9RO##Iz8f z#Ys1xn9sXmR>Q0>Y|I>awf6|esG56OR!-jxaeYCwP%F@|s=rsov2#hN>djqpW99UX z@ep#_khX`X!=axz3R|z5zRsHWRwhj##&I$Af`b6PL%#t2j;kbn=s>8Tq#ag%oCMx= zR~sq}A6JM(-L7W*>U%81vv19nK?8TYt4I#Q$IyWSZi@`Qc2JD*e+%Nyk^*5=k0s!> zNxkui(P%{}>FxrU!PlC0L{vz~N3PiRn-{91q!B0}@6L6}Ts=cW5Vc(F6A_wMP_*KO z_oD!gT>HS--iwiG+DV$E%I*IjGNhf2!uz^6&cLs-`!z^;9Kri>UejZ8D5bbvwqNyLA8@j{;eUN3olcUqU4eU-U0LrMke32 zZHRx1?9ne~lbL5LJV$-3o)knQl0+uoniycT+%lN*y5yXhAoqiDn;~ zO%)eC?s|qBb|t5x%-f0*UrGOb=CnvZ(0yvtR^ykt&G=mJtUB~`jT~jDY|7$m8L0bG zQ^UEe)A0M6$p!dy`*m-+W|8%m+a1`mB-U$R<#2u>lvC4{)82SFp#G>o3Ssg-X|V3) zt9se`++W{peSS-Q#4=EHeeD0;(5U4Sm3^|#;?Z!iGqj0y%1P|woz3+o^eV4|x2mb+ zb6aeE3HqGw*b7>`HrujIN#|RG$wX!Hxa_QpopM@6eb0R>=AjhiI2u(DBm_Q&aCP15 zSFhJyqv*ZOMxSQ@6g{CgF3hP2Vy){{t#N>#?ER+T`moZq(in6^qF(nZ(Em4xr z>!NO!$SE63;$31Igv4T@E`#9kxQZ2b(WdOy1|ar^!O?9s?_U*n-A?NJh zXq(OlwG<@y1Xt=_C5=8C<7PoS2#1B2#0i^jw-$T-ZIzLxUk@vvfb9<)+kyT+)c+t% zPxa>~VqFok7XZJf^TPeE=_cRP0Fd?ce)QoiF$6Sav&}9|UAiCKAAOpGmcs<}I^I4= zQ=e9(hF_^^CnKpyTIyfvvV~qBEgQYOu7@(uvc1)w3_b(e-gne=*IT?UKO%*cV26O8 zY0t+SwZm1}y6acKe$r{V-s>|6@=Pzy_32H;>(P-T|DVO`IkACGkDDoi(fz7FWa`ku zwClY*tPFW8W2w{KxH|0fdFGNsx3c{)+VL~D>pbkxGI^~t{`qUSpB_HJkO{8Cp& z$3s|P_}yS`V2QNL zlu=W#kf9~R&+{ufHlH?@QFD#esMq)J1~Ovg6S1tsg46Zz#_@s#S&8YY^=mKsF=&0IW@35NYi%~>xS^f$}6E}NO?wFuxKbn zOV1^QNBz$Lo|GkEs;1Rk<*00zP-=F^$?oiT-9SmZ>L zrQ9A_{yrnU7aA7kLq+`CsCU+|wp(%$W8^Sq{uBDY zPu&;&iPf3^s4k%7b;*CDTRAI239PJ@nvN))xO)W*_|jRNd>nH0s?7Q?5+Zw4m1kd_ z#o|;MlJ7lSt*SbNE-IQ&W=A7aLJB{DZV3_+{?N!0t*Agj|P_G>UG}7-8br zOkt{o{sxPI{dLNE+QVMORzVxcO7oBPWE;l7`U;Uy%FHHmWEu3-q=EccjHE!CPhTZ{b0ia7q6{z9+2K7U$Mb^FY06EYwsJcd2Zm=P|V#+o%uLU-{5&PxbTp;p1P`mIC%TuDExgBtCQ0M+Q zy7g6_ZaSy=5CLfStt8Ss zq~ZH{f{WpF&Jm0Xwa%5thyM`HAs@jPNuN|Jbl$P^mHz8n|NHgrCs#}I`A34Ifl03U0gex-=$H}xgTd9ga=ylA?wedvnQKyt zQTscufQ)bFE|Nwkrc99E3x!?|U0e=rSXWk3olQ6!m2#DSh}Gnvc82EBQ#?`p+7b<- z4S%A*4reO-iKXI&VW8!O*5S%(ZV4-RxPF^?yWu(6k#Vp`T?}@tj6-`Xbp+W2?HdCL z79j(W{5{j1xSW?8yPdO+ZWLI`>D=m)bAlLqz$|Ak!LLgDj-$F~pK3j8ml@oUwp(#d z&`R9IAasR^yp-QsgX5TPZWKu%C)s*#+*ezg-&E3kp*fi&<-N3-!|~}g)|gQ=yn-de zX6d!Eb;0Tci_DWhDx^15TF{3>upO4=Ot7<{i#x*>yv zt><&x_xwvT)@p`rD$!M z6TqR-X{f=-?9wv&sfsZ0H zRCp%fIQ6(W!iFnTv33*zXr&P2R2bodH)tuWT5r_`VpS35nDAXpx0OoOz>DJ^?DB8t%IF@AluqwrfT zB%yyKQ9j?XDJ@R&JnewSmSrj#HLL`v|gnmacZu1Lr;ZQ+E4AJO!^+a@VY!(0kvA@;iXSY#aq?vhi=~Sa__Zlc z(+&&$OB~KUAfA#f4v7V7TV*@*5Vv#I;uc6JALzy-leEo^?g|)Aj(EZo3l<3n^&FqW zA!)$81$JPv5j{UL^AB*)9rb!=N8W`{2x6|h$w)nuC-F_5&?FCyC~gX45n?iVd0d5J zU!^SJ3-UZ}^4a=)^_F8zwA^(Av^#{E4&}3^h2O41FuT6BR7q|Ly*yO4Np+5GyxjCG zoGLkAE(Xoq$SCN!Kki-NahTx!IhVd@i1^Sh_y&sO>l2y>_`CeV{*du($?T`}A`>@tEZ4MoDo1NJ91(ekC}Y zdmqD5Q?PzRuMQ)JSq&-h+rgKCh_HT_LLPRXgRsC8`*6F1v%}f!%gb)~oc{VxJf@qc zz%>;5rR$dbDJUgwjAdr42B%AY)MH0Ja6&gZvf0)HyJ&q5kzNO+HX|j>0^X?agh^r( z9K+mmP$xlbbW%wc#cEN2YMOp^iJY8yp<$_NMw$RKUk7G_m;JSh^}+JZ_19y)1_%kd z3nu*gT@SY+!RF82%V-_A6(beD^5D53sXxZ@4iA&{|k+GsM<6fN8eg-+q(fmuR17UA zG023Q7?Y)MOs7x<+9)B&r3+{ga3=&#$IT*p%BM+*WyHiyaOTFa+hHh)CVfrMvV!~8 zwqez@av3!!Sx^sDOg*!^Uc6sCzIPsV#PKavSn8+5$pIvKucs~yEThcY9DU5AcL^8> zMG$GMX%WyIUw$JOW4ZALj8F;jq$cmcQ}05~h?JL^`Sqw%U_~4dv|o#hk3>uSn&4>> zn^07$(f68GQ8Nwy>HvdLI+J7aBVym!C57?_jQ}?TC1(cd7U|WKT+&otA$oca}H? zSWTI;9ReE{TI4qzW(m2WdUMFqely=PwrJW?kpbQuvz=TpGgSsniaN06z8@k#6ftX9 zbTnZKY;3_&L}%QX8zw3&!V==*mB3_16O`z`LC*sr*goukEqbnED1uK+g%$fs&6E&- zVwDZJ^(ohdsZ#~pZi|f6C-ME3Wml5*X8mF^x&=p%bzdm0Lfj%~`=f-Xx}bl^jh2yw z=vVWOIJ?b)yE8JUcgl2d!kKx_pmV06%|FpXdNpL0w!t{}`9PlVp z%>rCNW`t~TcSReg(B5C)9c_|Ys+{0}Ya#}gy#8M{SQ()S{b0D?`%6)WuwpnO9Zr;x zS7M6t7a2-vLKpMD3mMMDaFoF)jq3B_$ZCkF^A-ji)vIWtC;a`v{r8*lS2A^$kPt~s zbjA#6n=xcmkw#i@H#Pcd@kmV8RLLOI*+@ic0J4YnSP0j`?sll0x!ABLHqaR(*t_RklRC+EW6?IE0=~aB@KFbyP${P{0^_HvJDLnfxUb0F6XkF zEa3pJVT`Xn%0@4K=YUb^yazE4LFJA{33$ZlQ&Uj5#D=2yYj-o_CNNS!Q;4`2GJv7? zd&6a2RJHgf@^&J{-d^ctQ53*ib#@uH515|Gy zzuF2@L$(f3;FzFUUaw&$ugt}xaZ};gL38e27BdU?N*Er&q{>2(+2I&))ts2j`C={8 z-}pQr^kex`5(Qv0&m^+oGHV24LZEv|b0OPNBfIv|(t|f$V8lF_W~@>35L4KbpI$|P z@aHeP;Z2^KzaMTG`Yq`{Ucypy10|TDf5W&zP|j${(y{_zOY6~Xts1)Oe(Ih2_+D6P z2!A;+#{Nt+!!=NP#uKiA7=+`7CY{_sqEi%BXCv{i-%&X-otCBJ`zSJk6AFJiWiR(6 zWya~pGMQ)_!+1g^!OAB9MKJ4mHov zhv}er3g;hK(N@8j4&0IQ$1LaBVAZDQbD;+jySGq@uzfe&fu z`fK2hs~*VI?}F-^-}&bQfbO-NNw484+}UJ`6l{q<{kY0cPDrFNP%nWceYkhKILOLW zJ@nh?G1|xkn%m~t{pLCu;HT4mr@yB7oMQELS?m_Oqg1aPtl6@x#T#JqG>wvP;QYE< z$*MiL-@Rg`)2yI)z=k4t+;DcGQcn_MwhmmeFk<*DYk77wdvEG=fMyu@xP46LPpo{# z((HpH-_|cVHkA=aIRkK0esK=Fyp~_GhHjjn{S)j{HigZ)w#}-#C?s^PpqP@i8zSj&&>{XnWc%O)i}dulxsR$3#3erz`bmRD_m#r#rZA=nWn?zanpgaqySW z+VqQfjm|H6ak(QHizp|0hl9i=*gH}9;=O-#%JrZg(bF?W!Ra)q zmjt9OX5EuoYKAr~hDc}&KOxrCZ~y2!2#z>D~QSk}!2jxECxteSfX(1GZ;L_Cvl zWp$yyzTSjtcLR#~_j7Q@s-l_`eVLp>Lis3FzVZGCe7V-WqvO2o7;^IlfXT#^M%&4^ zT~>UZtgfJ}d}}xSu~%Ddnp{NsE+k$%zW{`Iw~@$X$h%)F(`vto^%y<8t_$mJeNmJ8 zDAK0S%%EkH$-SH7R_nvl>IlX)5TWN6A|PX$4HhD~LIABkH@5p%0{qA7Hv)we{Kvqc z4%SWnPPYt34Me*pXJXy4n+nnn4;oTQ0L_ivTbXC zebn^4{tHC9LDu`OX>=}yaa~TV`9wgh)5QgF`ALZ*qp7xCzoh4#Ws8S+rA1Z8QLIni zoP8pQs&{hOKC+}y=gdg*5*5Ism@uJxh7pj%a@4xQHuuZBOgCzC^zW<*-^j8!isZqv z%p8YPjxOwwnb~vvYn^cON;6@(p@to6y41Z{_W?xr*)VXf$H!FRC^(!y^F^L@FN$8A z4>F{goV~FBUTluDT&D4A?c6P*Q5$6w+P0?}ZO_?kEC*leKS$o$k*1mTktcP%e-=MT zdZ&NX3MO?0;Do~nf&sp2-K5)z!v#xRCn5&Vo#~!P+?;zp=6ok7Ux;-PCNFZDxDs!J zPJo4|s}HIYlLj(A)=niOBes^G@u%iec^!H(o&H7w@8M3quvR~0NMTNbpj zBp(Bz$-m9gd(cbT(HOdz6Sjz%G0B5UWVS6ebaUCJ!(mI<^p$o^Yu?CYAA0tGvPDrK z8Lq_+<5QD$m|oWvR|czaEsW)k^WZq|hybg*AtK^n;t}k7c%zuY5U|t?B8Xe;MG-<5~vSPkQrJED`F1bDc}wF>-OSrSre8{TPhtdyZA8CkHyR zExYmkbTeQ`+=v6zpto(z2iK6AwZJb_ed&~&Go%9fLVR|73FCCS?BL$FY2!Cc>(;!7 z8GBjaks1b<2z;BN_pK*{*L!c)wB3-xLJd6XRhZr(8ok)z6&|9QGZ6d1Q~hLx`KQ^9 zte{6Z2InF#ZOMYZL19GJAeRh3=>lAk$2bAmqZQ19iNb7t>H&JA1ZuxWN+;8r2#vyv zIW2vXdW5JEJ&C8S#Bc|gx3+UV59r&{ka!S%aR~M#geL}5E}RM9oH`j0KWFKYvglKZ z2itLnlMk{F2Hy~J5(nKleyT0HMY0s#sQo^%o}6tHwER#F_~gt%9^_we_5_luIK8c9 z-8oGMVqX}f?Oz&aiS#L8E{O*_!Hvf8ELVK~vJA36aF4;_7-bku8^6iL&!oD#K z7(RWoJ~luzZ6hTyFv_;jH&{LbxtHe=fH=-Wu&mc!-n|99E&E-xwk}Q-bZ@$x-H!EK zlTac*kDopHlFy&qyrw`hdjKL#)_eN;)%JF8QE}!|_g#6HnWe{L@4|V5UY7e#Q1qsn zo>yPngIn&US~BP2mQTgYecF^3=~H$6TP_=HJ+|7W=XO5VAjz1w^>voi`TM2!YcHB3 z7jWR6t>V&hLwCC|6XwJ9K24Y{4){EkeQCd)){>x0 zL*>u2LQ;LRahv2YIou3J9JOuD;_>uRANb6=O|Q%5Na(oTQO?$O_$O@`>$Aln{+wK_ zoNH_eH`RNe(pz(Gt>y43yE1bha6IZXYp8hMO?P~^ts?m7sUp?>k)$H&(ZzAQq`)L_ zHQ&+#(6`+D8IniN1|GY-rs~^VGq~qnw^J`I+cr(2R8g_mwduSX?l0ARcvTa!isjng>$y_TI-J-C|I^O%&&)Ww5rua!65OC4e-$jQ6!EaxX{>b&VhXIhwhcrrOVGgT%Jezx6$h@SMRwx zd%LAp;yNHJK`K~~*Jt0n{%p<@h0$`|^|B|6$t5E@6ej91y3=W?K{)$)0drsvw(8w# zapBPfo7jE#vEXuMF;4ANbaDOm^6jKS^IV~U(tuj``MMz~DpISxS758-_BqkfirW6Y z^E2(ecTNEmW$Q+?_4pF3hWhKB4A^7i+XS^<&H=$gnp8GxG6$YzL!YdYCHA zhFLvT-4Ck1>}2WVXWJ8I{PcNf`iz&m?;bUSL%y9L3UEodPLWk3CgViN3q|N@lFw+P zUw*ARM>Q8g^rc2D9AwWsj%6tAZc@Tn&U!V;-}Ei$>3hb4S$NR_oZ&|_mN5cwwWVh0 zRnRHH6rWKjgPC;1{(F0$7@Z<@9@8-gPdfIGHnW#ct5JbL+=+eJbzgHv#}vGzD2qg~ z?Gt1-LHg*xhDHv(M6Ii6_-1 zf`lWwxL)1)F?ti}50~GDZ8hzX6o`IE$WFsxX>{?M^u_JS(UY>vMw6GxRnj;1H!stz z=+$E5}B$LE^o#9=kXZ~FPXxfVZ)p8sWu z@!*?GX$s_y*#ibRS`SD7vR3zW#F@g)Tx1`RWz%3vosIUVW=LJc(8I2jVC$z>d@*2? zNP_XnNRaL`Til{V<$#GPH*?qqPNcGUs)5#b-!dXC&FA3S%`Wu0JA2K@(1+|Rd0qp{ zDZg(sKUH&mUsy-Y-J9riw);37je1@$xf~yRHM0}Cu`T`RWq8Od2hF4qPwxy8bj|4M z6TCW7+=O@>!#fi$j!(R$;G`*urLU70Y$#IQw5rT9t<*GEjhPsTk0l;PQ9&DUQv@rY z^-C3}O~&npbB$337)NBQY#tKkWnuDH<__qUqdWK|m!`$_8aay$Y=j!K(rpGF_yAHB zCzwpRrh*O!Ul%B3fTpH=50sG`j4ae;BP`I?C{hVukD5^{zmW-8aokdQX`7;gt%jkl z7JoqEEo~!zD&K=EbiX%c=Cr>@8LZJ?MGaUPh;pbit=x^z%@3<2l$j3A^U#4kz&T z%>{~1r9+42&>$j>{18Yn&cD4;XNx0tiY8I4DQ?!ZYZFK}X zezMVcyNyEt1Sv^o5l-X?z9|{VGn8x;I2=SY7;+OFnxfF0*MdWCNYs-;%!*cO%!zc! z-GhmTKBJIxL55wL6N}JKYSOnO=snfdbM1X#NBn`rGS)j!MQc9w5ZXSY;lE{EBADod>4fh-C>Ks@nRPI2iUC}h^xy?V7qf%8hBvCDLb@eG8hTBc8bh9nfuN6$xnC#Bd!}#l9_);t zPVloV2wVlezGx7}Q+T}vA0yiCyMp0sB^?8+iXiU_!xECcNP@nBMk2zTyL?a4B^;ft zRt+L`IL#4nMsNo9MLFqpY`1?Dy`c_G6A`Ss%c;0cTe+B*UAaN@i65H{CJ~IBiPLus zPNR9?3T^hPc{C^C_e6Dn9oMfC$S)WpM}LgqA>^F4*`qi7Av#Sot$$?O8oxjI%s0Xh zdj|!^I<4s~fqzHmDYCpC9x@q~eL3w)u{UTp(t)QE>1I)S*pOWlhChvXgtfD=0BVFS z5*tCtPMXW5zcdeC+mhd$np29BTFJV1&(y#w?R?zFd!h-E!whkEBdvE29xcBIS2z%g zN7Q@xo1F@RSXVvL4grk-V&WOwC^LF_3WwU8XlSu=)F{7Ii_UV%84XSsBM+e3OKA`; znh;RWk(cpB&xipO;7k{QC-!u1PHZUyl!|w%8Z2(en9}8jnNcP-h2}zKSC7^bue%~1 zG5u~Fqrs4&<>)#v2Bd)Ze%Z2ZA$~5}Umcsv`(i=|F^o2rZI`ERWw6M1a_hr1mjzi> z_OxoO%{11_kDb#KaZFPbRXz@;gHfPyn1wB8ZlM?`8G*N-Y2O&Gt8hqB{fM zsGk^nl?(?+fW-C#4R&2GpqfAuVpHNRrGROZ2g=@A2*b%x^!c5)rFjr`SH((rN>}~o zpFuBQ%tU!^4UMb7qepJ10A>|Iab?iXgw^>OZxh=o1+XmOa_!Zt;~ASd*N4o95-UW5 z_1ShUe4B6F`_4|PwE@m=Fw6+>`o(T3k#((BNiUA!&puj>ke-qVY8LqJ!*!TQeI_m`{os38k*k zBg;6^%$r){MD>v$=&M?9&o){5H1Dplzac`^Xjsj?J42(T@d!2`_n@iK06LP^P&D|z z^HSAfSkNnSomP1$^&B=>HvD#q%~$JpvDlxkr!IUM_GV0;`~K}E&L_msDqYSupJ`{FYS<8A8mXu zn0T&#ZojfV;2*p8dYrtj3)Pi9=5n-u2-9xB*2VL5qrZ)F7rL*E0?~i3UF5!-&Up8H z#+E<6Ved|FQ4t022q zIwj?+IsOu~Jir>R+jux3Y)m89bJhASFY4A{e@NF-`q+Y_>L^=j3H8|8-R`WbSGUD^ zl3jQ1Y>_ysTx<2Q3)JlLeuil>J%K;^9^H9L%9gjvaj*Z}<*{IIGosbl|AW@)OkaN8bJH~M&2dsLJ^h6i%aeV7W_S}slcJkR#1 zN9|a=`Ta%NqT!3iwq3t~D66vB^cF|`?J>HHWDcHBd+9V+yrA0@52^J@(#d6xo8E0x zxQxcl_NA!H^JJ~#49QadrDk={?91VvxuVS0Z--+gy>3G97wQ^K>~fZzZdTLV)YV+a zwvPj!wm?mfC%wr8EPl0?oQ5&$$>%E9p6-{UG<#ST2d>WxE|O%#gu6#0sJ^4tGgaM7*&CPP zv6I1B93%DN$D_Hb1jKPa><#2gaU5^_iHy}<$>XT7v_K~D_C}wItSH_UB*zNTze6|q z^vR9Dhd$jTO*_?n`a@jE60zYIX;=5JW$y@V&}ru9xzwDUhl%X98rfgEvtL+|7)u5* zH4<30(Weoayi1J*k}QznRCD(2zDs1g8J^Rn_c z6kbgi>n4r4O+^C|YRHpXuzW37&0vQFom%!uz z;S>L#oAv=#f)?434Y3U(FrO6=4-~ftoh71WTc0zpGfLmp9USEs=7XlYzK`qm zPdMPHd`r$L)L0tt#TIKeGp@{MF4H*OunJA@H)0LqI~-0r^^Zw&Bp@=xImd1APe^mz za=IVU6jLZk5;u~G5ay%aa4NxIuP#kwsF57$e|2WQ#ABtYNuMzww}VAUW!^gxtNMMM zgp_ol)FyX$7&AggDQ-YZQxn9zmPj^cB3&ZHjIvlrBA1ILHm5IJ`MOtyiL<1D5TMpE zjx27;73WqSEw1T1f|Dt|p`^>GfXwx?A==iO)s(6JAt_DtV4qvlO{EX zAK0`g+3ALb4A_n*kTv-t9Swkwk^8qdRDu1r}s_K_iB#ncrib~y~ey$M+ zOoohEnJh#g!z4_buIC%u4Tz`?`6(ThqIK0L445YnUbr@Y5@IKB9}ThUtgLR<8cM~; z|4eJBY%xa>E@40tu1|xlaq>f&DA^acPKEgc6D(U%0gKOx(WQu_)RZjFxda`wiDl3@ z>bp&9E7sbwIf{20)mZBwjR;}l^N_TyQJT_WY4hOK2pXE2jcSAkj7E;)VBxo~4x888 z&CQrofB||~{vzTO-5AnTeiJ^0(K6IE7L_Kly&ZkkO=cDtu{hLyG14mYqx#{8)ISM` za3s9!b&wIGwuc=_U}tin4Dd{4F;b>3p;9H)5y?7X2WqfXzdO!@LS#*xluA0R^nGG9 zOZxXyGK>K?_U~f464Ek!H%ysTUv41L`lU}WyV{Tx6A3HUkdp&FHv*Yx-lk>m<6$Xh zkyObJ;Zg)?J~&8xvIu$$9s0wZy(7Gr(mJmQKO%Qa)*Mz?3QDe+sbI60wJYYkYxKuq z3uovlz4oCkx`#GJ&Z znjbAMK&>eKLMJ77jcpdIbfRUK?6+;|-?V|9>(b$=NwgKuZf*2|c>~Wqvd5ZIi6U*B zh;ddE>_h-wAP7c8NqOr0D>zhn*8yt9%X_MN=cHP(`lF}=QHWn4T>qHcwfI4Xh0MaxQt@i~=>@AgrGeeRPZh96P)TIPgB-K2Sk zK8u9V7oK0;&j5roKYgZ`zu2`%HYj>g60YW%Y#+*mptwjGx$=JR%so%DLJ2MYRd{YC z3Jtu`U0T=r6-fT#)*>dwNLRs-?L#21CPi@{VIX#p$lV#v{3+Uc`Tpk*pQcM}&=un= zbVq2h=FWh@7SZOirK-j%XGPNJQe`-O-GyVA;a4zz3a<(a88s9aBf^;(@_V6T_Y5sO z^%9ice$8Ph@YO!rL}B3GG~5Y6-=ZUM?_BH7F0>&`k^@l~qbtpK4GNLI_N~#(+_M2r zOj<}MjDPp87gaT|dHK_?8|iJjuva)OF$U$MZ?_ddU0OmQIyWMgoVDtHqBb6BC3MksTAu^R5&Zx(D^1C$r1sz12X_bK$He?g-qH|IqXl&wQKsxpkKRth?{B= z3WAh*IU*x4MN1PGp%AG=QYYawPt?f3x4nRi5<|~shj#oev?jl(!_eWUhb&z)xZ~6c zcXR|q3`O#{3LRgm#X4hoP{i%pG!TQg`#O;lQ0PE8gsK}2J86C)DJa-7P$Ttir|)bOQGBxtu%D-`{-7@XCf|sf(}i9j6quM|>CB^Z|ALT}RE#n5y0( zY*Yd*5@pHEl!7T(_MI{rjStd`=u-G=RRaWq*FJptIEeC_hZw5N-zJOQYjU0G>NimA zoPgON(mM$=Tlq1hlC1!?6s>@4-=y25Q17qs{x$C?>j7%$!Op|y78fR{JM%D0j`7Xr zI~X>4Th@lQ-sWG3xM|+8XwWI0^`O6{Mc73_@cMCJE5993UImrasEjzA;5@j1m4Xbg zW*-l1FRK}kG`G^H>0vEGLqrZVmJN<)wj;a%y@fpn12r)EzmOLcXx6lV-G0ot_IGZQ z|7ap=D&|nJ;k;5ffVRS~mWCbj2*+|F^=9bdz#JMW=RQCY_yKpA!9xLu=gNtGwd=G+ z&H#T1a3!2kg8EpcJ(lKgRP6g0u6^7X7XomFA%mx6CI5Tj8?1xd=&)AUM-$gPC(gq0c=c>O%{qjLAuo z9oU7*I7?$C6BxVoG3e`*V~;)?ev12zeG=~hZTrFL`1;sK|5;{wQWJb({EB8+P!%0L zw25uNQe$rzL(5jB-e4Go&W5Ecy55bZ7Tz)9DVfe%OK|MR!tm?B&+m2nrm2cY6NHsL zgDbX)Z^T4c>lK47P);Kruv!rnq$Dd-ggmg@;&lTH^XRb^hJ+rNyf~M`3pZa1j$0 zQhT)A7$7Gosdeh_+Xif)+LF}9vA?x)Sf)i8U#LaX$Een~bW6a7I2T(5fI12NM&RdF zVD>D%F?#B`5D~tnw#*K zL3$dLdn)=>t#5hOT?DIiSz1aPV;pvLs z<-E5I4wnTwCk6Gw_r#R|K%H8$?l^95%&ri=n6%k3)B^?3;NQF=!O8Qs!>CL0`wHp_N33hY?&Q0! z|C+gFgAdKLBr}ubPZQxeTB9BSg({~R22tOi2 z4yTwX_JXILYDvH$&N~gp2<|3dFU{waW&GZp9(mF{X-n@7`uf}O%LzqbK4)PYw}?h0 z)76$0fEHaLkcaRqDRFE#e2Ea74zj9~-fJDxB%m@-zAB*-Bc}l*%+LazA9Q$-ut!!H z377+1r2B?n^9N%f+Y7XT4YVsTjf0UJZWdvyUyYOXlt-%$T zECw2SrvQJPdaV*D?=m(?;ucQm?e1m4ozl+;J6uDpzP;g}rdj9{kn$(Ep`<|C#~T73 zPZx$8OAa3GV;+HEAy#Fr2&7__T2T8tgzvl|dsOqd|{a$Yvd%hnH%iYGse7=@oM zK(!}E;6u)-4hJq8H`-9G^8(yM=)0G7@Jc+5FY3S%=+TUKSTq-&#$MU!Z+xc**8-U) zjjwVM$z1aXH?%k+$vW=@aa7Bcu zCFGX18O32k5{92p?I;@DvhO?=Z1q~hU_j_Gf(mknO(=44D^c|QMl{ZRaPQNHV=5-{ zZ5ynKc~E`Z;%0!PUz-O~!v6F;@^N&QjfrY?sY>QWp` zCIw?&QTTliDIv1q4P4>V#<(F>lWxD%q5+Ui&L?x%TZ+~#ylmI*kwr9Qm(y!R_O*8F}8 z8otNxE06TN(U_WCeteXx`0)YccFUjgyctuWz2J0q+A{0&?M1lceKF{%Os-(Gg_q-G zSQtF|(e*y|$m4jsr|r=F{+4+tqyOV+#m&e8v>HgvllF=A`or}oqI-rNsjI2mzNH)* z$m2<4@5N}di&~Wm!j#AHzT@6n`@<1BsO?mJe*XPv6u|(^qyswN!O{ytNB)!a<2I6% z+l}w+#|`(OBZerQb8ao!C+EfY9<9BTbhT+k3nzUGAkVW-hXsD_{HMzcswVy>^{V&h z+_pdOE6Uy6o~kbvGCIVs79OH>=kNFEGf&2)ANNm5@pXJ<7qV$i4{n2L)A6fvL3mrO za=aLYUvbw5kWiZ;T|Bkz7?dzzAhs5CmRbU zAY9j|iXHzE@D};dHnXZmj!O}T*>LMnLMOMLj{mmF1thh(ypZB*JRdj4CMzRayQ%*o z&(g{bZ#Ufy_*`X6nZqO+qsuo8GN1=Dpiv3t=~OEc8m1W3oJx~1I+~<>GC3QtV$92X zt;U!+@rudy9qT~5Caq{g7uCG8RpfG`H3OH-R4J;X3;f;mRo}Gp_0=UDesKLfW!I@S zcijS$%;|3UOZcDV$8wPs>6O-wbR**{oBBnDKxZ^!VG7$LCMRCck`^1gL|giKj{1&bd$Q?FKqH zK>}Fh@Y@((FdT+7Gr81bY`qi|0(;(EPU)QUrVtllN2Aorad;86!uJJJjgs>$GUggn zRw?-wnreq1nu@VN(u>8a7Bn)c#j4uVMRUdU7F^a7ULMPfz`DYbx4doFt;hF|9-!9a zTOL*yTkjlA6N8YQyh4pP3OJK;3O^_dK$e6ma=Mc>Ji@JeC`wZ@a>g+(jT}{$ z#nn_C0%-bnONw>q`#CK1{RGS4$|7qH&LZ7-> zh2wgTtF2T;huUo@^#vg#IVD52q8LQH+xdxb)WL}r8Ye4O^KvV!*xbT+OmnFz-x^?4 z{zi?VSX7UGQ>9C(@CUS!1S_XI<&Qg|0HgAb?2V~c!XuXEWrYdVmAg(Ruk4pE67IrN zlrA>|dEL>maAByel`Y&*Rd!&EkPn7vi`RFjiPAAgQ3-- z4n`jhZOXh+=lgLlJJR`%{I;o`cSKgueAR+k^;jwQMD%ynAD2)wn}j6IsgZzTpC(Pq zdj~!A@lUNj$`WD$k~YM1bTOEqo|h{xeW*`^QsM9mZFi1f;VWx}X`cDN3>8zX9rPDc zAxBW6^~&Si)4qire6s4Ohx!tM$McRC>m6w{Ot?oqyelkirC+a0G>&jQM?F<9LFlzU zbu6<4!Ga)T8s;~8!xv|?SYqK|MStSXcxoC@ya?i~U;o)AbzF4J%&@uvh=Aq|vECD! z-TN9NL1n}swZnJjTI1D!i#GfLK%~?nQbSQ_)jV0TK zd{6rkGf}*}aVIR@_2eANKyu?zs{|G?B6o1IlEJu3i94q6$?x*cgS|#3J1^xg=HX$G zN^5zaUw4$-O+ zydU4SU$??Wbj`r8YdL@40+?&Gqlm;=J9m702O|g5jP1P*-TYXn^#-I4mCO|)hs5HJ zP=BXqQ!`|coJ+_A+*#N0vHsRo`w}5b2*!8pmA4&&|D3>0u1M9PjIX~aAv^YRGm`u3 z&KGbcQT1hfluUXx#!LD$6r|j}WIp1RAAsfr(OjJ#G@+E71glYl>kN=p7sE=7(1F+& z8x%eqJJ9=glpgqnJ3=5V<&AQ@ehSGOOi6#eeg=_3DUbsK4P$q#HEdl6J3}k7!C@A!aOm|tO_9HB5|$@3&;FFWCT8Pr7xZYjV)R1W|<6mnzT zKJT5rfntOhIPRzeCUKiU0{MM8#>-kilMmh;MdNQ@ zlZwiyhvZ}}(<)%H$Cdj-45c|elfMx7h!Z42B4D(P?*3v{?7BAm5bo;BJ5h2Q1gvUm zBU84oF6!_!@=aeTy!>%&FfM4ae1-JAP*~q6;sEJ)f*yqUH@e2VZtRF1g`VO^GxKMa z+}X)tR5NQqE?Tke(f0?69C&MfF#O>Sj5{t?G0xwCjbbUXsFuZV>6UVV&g;N3Br>hOLfLmz9M`L z;9rpYk>eepz)=w-oDvam9t)5LyIqUY`-on>Y?vOn!6y*qLP^KIsgaa_3T=OXahM{M}+ zfgG%YIh(siQ#@oF4Gxow6Dl}3{6XJW6IWf!nj&0`l`F?Apm0<|=SY%|Va~m?=9}9ay0@{+f4axK{ zzPjZnA$HF{Hbrl_>ATh;%;n3sS$J-b1BGnR)h%h>>=L)Uw{?nW$@DyYFNf;hsQYv} z;R52l+bCH$a6W`D>RSdCn4T=6%P#3QUjFPAuDV^bT5fB-od}s^Z)>i-)Y}`;vb|n@ zE`i9whU96k?0CUr)wT!Z@wn9ipCPYg%lkMWE28e@^;m~pA=eDCH`{g^Hk@<<>d&0Z zB%*6RZMnH-m*O6Kz6VciSRVzw&$!=4hRlVw9V&Dv0+^Hru-f>X_r^#|U`=^lK9tgP z+e>sNIqk%a5@xS+8zedY%zcH0Cf6S$D2pmelEJqfeiH45%5_0T091=W;pQPNU#R0g z^Dn^dMWk8mu2>&!ob8~@gjVX2GI_rFCPqr40tI}@L?jUf2a*ktgLO%s z>G@2{%gfDLC&o&TFD*ex8fL`&r6f^iKgGsd!4h(@_Olg-h*`MCYje6w!otVp5>$%< zH*r!mIW4%k;~$m~cW}NyU#b#X*O%!(e%2V|Nj~8d3>5ZC8h77en{I1dsxxEMuE0fYEYJC#D=~C9`jUxN+`A%6ni3& zYwbHa74`2JJOgYjR}!>Lgt1vOSn$qGA|ixT#vvZZ(}(Gx^^tc{ySA7i6e>)}2{l!N zUMtL?ElesdD>lDO#n<|slf0pAVyRPDuo=%^GJ6J&B}UrU8=(ES>E+7-Lj~hCD;Ky( z8O8dp>8boYvfBk;<0wZz7Z$D@e>r&}9cb{L_3aKLxnCw9Pp43rx#WRWS{4*7mh;6DMF$HmPCd2*a!P;piO_Ox{rhyrgA8a7~4!V<%bvhQoXWs zOlyPIl5oJ$4_M5OeFd*T7FOG`+N;g<5HIv`5&vGAUx2;Uh4k}D9SH9u=-TF(7vz;I z-_p3bG-x38nWlBN_p+VMW&hIk8F%JGuY^UW#{L>^Seiy9);F84xA^8L*kAf{_!hv{ z4-dDJ#{*0E#+&P^$5SS_7YnP-bn^jWy(kfBElpzoc4A!@9t9i+!u-|5z+d)3Bp~?r8*wu5=dZWA%VeP9Z#s2Ssld*^Nn)l0eNg`* zzI5xyp#_=k*?=?Cfs#TUoyq2Jip+^9$~!0o%56S**$BRprxUUFFpxzPXJLK+k{Pv- zpjbs%NpLz+3W#u4by*Mia~cUo%mBvxW+Ed4=<=Hh!c1U16xv_q9Y8#d1;YXWlUQK8 zS|E8{b1pCi7W1zZ=QxqGaUcM2_SeODaAq!$6nr}hL<1!#b=agaK)xwls$6kve}Ts! zRSVk|Q(~i&x0XFK{cNcigKaKwm8I;tg*DwB5EI@@!#raAHz;IS>Z64!Dw3NVSC)f6 zzY3=daz*w^Dt}5*Z1j|vzNQ1VV{-}bShmAUl-dh?Zds~T@xY*uuMyL)9|o!1M%Pxd z#iycC3V^l`6wHSK^GJCjge|L5Wm2nqaa@KHh2H0O6AyfOM0PIly6*^xS5To<zl86K8pK|o@zSDCa;{M8n zZ}(}|IfI(&Hov3&%CpYgi#kL$j3P`%*q* zc0$+1!LoSHMmp1{K64ZMCu_U+j|*GXmTmKcu*{+?2XX&8SYfjF+WivZje-yd{G1FV z24|K6kwGo$)(&%G7*FN8-zY3dnlNY5y*>rjqG5oGTnYW-cB$X=PXN`B)7@N{oMoQe zUF41hCUTjJOsu*WmyW8A-FZAG8dnvh%1R4X(m1kOh)uhoTezN2Uq(UUnZtwQtMGu_ zQJ=*{#0QVY1WT@>ve<5-%!g7wq6f7M3?EA0t%G(;=QFDhx4i&Y>$@&xaZx7+gx#qt z^;zytt5=G}eJyd0 zR_wmYbv*TGo^Xwi&eH>xrTy^WXyWYF-@pOEc$}6W_V4VgUtu!oBHwzSHB1a@F0Wy6 z*xR!?z-1gLU7YmzF$$Yx_G>ZIT{^nU=9q(ktD_P>X5xZHsfnnO30Kc@07*ZPK38KT zaa^p)eQGN7NMvfkH5UPIk2fR;OBfsvCm?+Dw1HRJvSGzuIb02 z_tQuW8>(4vX#=i~x z!cTI^j7jFB{q2T_87|M&%ZSGBF~M65M5c;q=#$g@(bdIFP1Xdi4}fu#GpuzW@h~pU zMy!D;(3j?^iKrW_HCj4qcA>=&t*s|EoxCX5MIYtKX@@R3ni=Gd2_q1KI0Gsy$3Tt| z=gCzw?m8-B{5hXnMb1@wK$PnKOOhn7IhmIED1eFp<(#V54JJ%gXB>6^hc4wv!YPegDN|M=PD(qHD~+=WhZZpfmzwT|1!XCSeSIDu+76Wz1*HH%MZvFE!7wkU>gsKy4?( z3NxeWIiqpfh=$3U`DpZNU*d}_DD{s1Ih1c$7j&qwdk_xi7p8wLFgW3%U%<+*(q=S# zWJuwZF8ocSHnEu+)6a0P}eRwd=prouPSJUsRu(u3h*#H5NtpVBx3l*HolgC`37 z(6FZPsqN5B-R;Yv<-iPGUDKhY?VFtG)h_3LmUrO>c&pp?hi0Ls3&)-10q5}7bLRw9 zD`X@-9XQnx`-S;eSC30TH))F5Yv=Ra%cCvm^PdrAbqxVuKBRdvFdtM!eK_akGi%;c zV-FF7>?j?76~*P9%74am18rXq)jh5bP@GI7WZ)1?w(VZH*cChq9!p)gHa$w_@;@Le z=JT{kC**j@zr`C;*Eq;@u|!P3qA{>@tB;ho7Dy4E(yv-eVmsF&GkBGuh~>=aqX}%X z6qO(tm9q0GWK^LLUL2#H$DAMH8gNG5Q%cIUe092nqUTo@Zz2gL0dnAcw=RCfoQIY~ zX?Rmss<76IxmNX+E^m!fRuf5e_#l*oa@42~9+!M{nTVe?jmQnmxR&9=zUaUF4fq z*WHjIn;_V>7DxvgLh~cX5WRjxGOP=E0CW5hTfGy1iHW)T5+#P{KCH3p@d-Z%d; zDgH&vSb-HX5G;ZOir)i4{&65YNHki?wvz$F|M&@4X>+0&DWDi#r3L5vxM~0%&~qVr z-Kt%(Qq0R6W36pE8-63Kh5LNOEe_^IB}EdJm#Cgnw0J|=!S$gqeo&Xv@m=d=L;;OR zte&299%A0N*VX5$+Z{^4%9@QlaZ=aI3J%Q3R<+ol+tj>71D_;Nzk&QwKufuaX;Csk z$Bd4Mhg8WV212rfET=>q7C!^im*kPTK|f`;oQ6kSUKhq^U!}|1`IlYM6$VcQj?w3^ zY1ef0VKUk97WOkusi!suZa`tZw5C{$R9WzSslj1?i)ITQGQ?*SwZyLa4I7$+g>%-~ zaPTYA&t8y-D#`O8p-PMkrQ*va^3+JY^`~QlGtTreyGhS_?oV4GM27R1 z?rU+@H>^Y>UX@mrJLuc9I4$Kj(0?|g$h8hxQOJC?3^E3P`&+n?<3KEsr?R%~90QK$ zSVev4MrbiLou^1>ch}C376$7@Vnw4hWObQ?ezvcOJGPnW939z?JezkJ4jxBA$>uD1 zZ_NlrneP#Wn1*4?7`Vw6*-f{rWZsmnq{^hegVG-OWQt*7QLAU z`XdULj#&TwUC+F#I^|TM!#+6`Z{XV||B6+36(aqo^GkflP@NuDzJtpRRI6GEWnIX` zSGho#@D#=7vO#9P`l>&{xusocoss**M(1`e*U8Eh0f{f2E%>bPSDj9s7-20? z-qjiwHq3-~4BbblgJxyLzB?A|_I~<0BYAV!S|C8Sy0g!9iJL)mG@!xdnjP16N>T%t zf8JB2a-b#DINRWppn0hDV3>(-DA0+CDUKApU7Tw$$(O2UVsr01&=*b8Nf1kmvWNvU zq@j$0Rm?_Y@cA=A|CK(?KGK%}`+ZY(pVa)@FZV2Dc=*x}&U&u3x1XZaaM3!PdWg)< zdedcJADen;HKs4soJAku?J?JC=Re2XKb~HHR7NXLjxelavAZ-qSyg%GC#HLQ{=U_- zz$iAR0OJJx&*rwt81D5A@*Nw4SDJwwASugr28`ByH54D8d~@x&B_udqO*8kM%ERw^ z@t@OhEv3XJ0Ks7qqe| zgoBE_$kUONuj)jvTc&=+ZVq17S=3&94>%=^4@5>G`6y(aRgcl$=Bs74>T^kJtAbCd z`aR{%2jJWfW%F4~Djiv>QpOC-3WGO+)rp$1ksnp)QVl!x8bFL_l~H~k54Wdi+?Dh! z^iD01V|j)$@S{@ubdhOO6LLyPZlFvjzU#g-?>Eo97{|pe`$gH|Ys?E$7Ch4N6)RcW z($5nt7QPE#?CaXQ zwa%}PUmeZqzYN#;4|GAU z+yelK;MG>3Gz|MaBLSGP4d@SYYv7Ggb)uVV9JmZ{76__Rh+++W{nO*&C(m&At1syxE{*+9WI?ZBX4D-&*y-q2^L z5XVt?VCW7Y9r<5t5>sDC-;kjI0D9N}03-?WKq~+1Aw~nsb^wXh{z8dyxFdgn^r^Fu z-E+iWssBM45&gZ7POoogCoctr!U5nw0Kb-d}!??d3F+$@1lWl?o+};5s z_|1Y}16UH$SyMxNS`q$1T;*-t3Ij11)CWZQg*=ZBi=9OP0Qw;e|5eIA^nNme`8$D> zzfk?rEVk6ICk-hU@wpRmPmBO#=}R3B+LOUoJxa;VWASEZqep|3#}k+xYA+pOktK z-u~q4Mg<(+^*3J)twI785M}U?|30Mf53R|M;Gr%cDd;bSpqh&?|Hae6|ED$!yZ;<) zFm(KNtl{5E$$PhhS_ScClJ@4`1tfd^C##XMox}f`x$}6QaQCm4@ETIpzs^wn!=7sb z*t;7@`m3sipDYV9A(FUw@o%vmg5zRxiT+XP7DA~h1*Az8LMlW22ffZ>Fmw;_%`bWq zp_7Ld5Ycr*kbf$kq#7*U^S9!C>l;MwNCAKaW&i+kjpcuMdu#+p_5jI#(OS2*5g~`D zG#Ju|Ano#hP%me}!#zN%U#QnEck)V*=3xbCm=Iz859;$O7`+$x^5b7>a0sKiS%gU9 zBLs!^2MVzDA6Z&K+yxsL>s$X@52-#^t#uGS5dOcKBksld+>-v15yfAnw9xOpmxmy; zARPbMyl&3`;JIGli(fGE_v?8wko0Pje+%999snll1HSxKvwyip{hyjWpn`S#fNy`H z{&6z*Z;5Z*0K4|ohSfKU5_5(oDU;6s*#>)TK?@?L;!Q>G@BJjWf5akyZ zd)Eyi71DVgLmJzkpGl1MFDxM#eGrKL3ybqem#qj9a}Xqu{~>N;+rP2ugFu|$E7*2} zKqBZ%Nc?jQWMQ&!@bB_G<@U9R7nJE2>c6ve$iD;1KTy+AzX?Xy{vRy=G5P%?+kEuzY>>Gy_OBBDn9BVDm5coi zh0N@J_v-&0hC}+8_kTc_;{OXp0H2Hl34f9P?>GQ6Zrl&7n**0=aLYT is*vz!Td`02FBAqG`5lOX#tq;G$U^@2+$H^`hyMqf_sqQj diff --git a/rebar.config b/rebar.config index 3ee27e69f7..d2924d233c 100644 --- a/rebar.config +++ b/rebar.config @@ -1,7 +1,7 @@ -{require_otp_vsn, "R13B04|R14"}. {cover_enabled, true}. {edoc_opts, [{preprocess, true}]}. {erl_opts, [warnings_as_errors, {parse_transform, lager_transform}]}. +{eunit_opts, [verbose]}. {erl_first_files, [ "src/riak_kv_backend.erl", @@ -10,9 +10,6 @@ {deps, [ {riak_core, ".*", {git, "git://github.com/basho/riak_core", "master"}}, - {riakc, ".*", {git, "git://github.com/basho/riak-erlang-client", - "master"}}, - {luke, ".*", {git, "git://github.com/basho/luke", "master"}}, {erlang_js, ".*", {git, "git://github.com/basho/erlang_js", "master"}}, {bitcask, ".*", {git, "git://github.com/basho/bitcask", "master"}}, {merge_index, ".*", {git, "git://github.com/basho/merge_index", @@ -24,6 +21,5 @@ {sext, ".*", {git, "git://github.com/esl/sext", "master"}}, {riak_pipe, ".*", {git, "git://github.com/basho/riak_pipe.git", "master"}}, - {basho_metrics, ".*", {git, "git://github.com/basho/basho_metrics.git", - "master"}} + {riak_api, ".*", {git, "git://github.com/basho/riak_api.git", "master"}} ]}. diff --git a/src/lk.erl b/src/lk.erl deleted file mode 100644 index eda7232903..0000000000 --- a/src/lk.erl +++ /dev/null @@ -1,46 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% lk: Helper functions for list keys -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(lk). - --export([fsm/1]). - -fsm(Bucket) -> - ReqId = random:uniform(10000), - Start = erlang:now(), - riak_kv_keys_fsm:start_link(ReqId, Bucket, 60000, plain, 0.0001, self()), - {ok, Count} = gather_fsm_results(ReqId, 0), - End = erlang:now(), - Ms = erlang:round(timer:now_diff(End, Start) / 1000), - io:format("Found ~p keys in ~pms.~n", [Count, Ms]). - -gather_fsm_results(ReqId, Count) -> - receive - {ReqId, From, {keys, Keys}} -> - riak_kv_keys_fsm:ack_keys(From), - gather_fsm_results(ReqId, Count + length(Keys)); - {ReqId, {keys, Keys}} -> - gather_fsm_results(ReqId, Count + length(Keys)); - {ReqId, done} -> - {ok, Count} - after 120000 -> - {error, timeout} - end. diff --git a/src/riak.erl b/src/riak.erl index c17277ee2d..1e70386505 100644 --- a/src/riak.erl +++ b/src/riak.erl @@ -107,8 +107,7 @@ client_connect(Node, ClientId= <<_:32>>) -> %% or the new vnode based vclocks should be used. %% N.B. all nodes must be upgraded to 1.0 before %% this can be enabled. - case rpc:call(Node, app_helper, get_env, - [riak_kv, vnode_vclocks, false]) of + case vnode_vclocks(Node) of {badrpc, _Reason} -> {error, {could_not_reach_node, Node}}; true -> @@ -122,6 +121,16 @@ client_connect(Node, undefined) -> client_connect(Node, Other) -> client_connect(Node, <<(erlang:phash2(Other)):32>>). +vnode_vclocks(Node) -> + case rpc:call(Node, riak_core_capability, get, + [{riak_kv, vnode_vclocks}]) of + {badrpc, {'EXIT', {undef, _}}} -> + rpc:call(Node, app_helper, get_env, + [riak_kv, vnode_vclocks, false]); + Result -> + Result + end. + %% %% @doc Validate that a specified node is accessible and functional. %% diff --git a/src/riak_client.erl b/src/riak_client.erl index 0c881c429d..ddc77019a4 100644 --- a/src/riak_client.erl +++ b/src/riak_client.erl @@ -25,25 +25,18 @@ -module(riak_client, [Node,ClientId]). -author('Justin Sheehy '). --export([mapred/2,mapred/3,mapred/4]). --export([mapred_stream/2,mapred_stream/3,mapred_stream/4]). --export([mapred_bucket/2,mapred_bucket/3,mapred_bucket/4]). --export([mapred_bucket_stream/3,mapred_bucket_stream/4,mapred_bucket_stream/5, - mapred_bucket_stream/6]). --export([mapred_dynamic_inputs_stream/3]). -export([get/2, get/3,get/4]). -export([put/1, put/2,put/3,put/4,put/5]). -export([delete/2,delete/3,delete/4]). -export([delete_vclock/3,delete_vclock/4,delete_vclock/5]). -export([list_keys/1,list_keys/2,list_keys/3]). --export([stream_list_keys/1,stream_list_keys/2,stream_list_keys/3, - stream_list_keys/4,stream_list_keys/5]). +-export([stream_list_keys/1,stream_list_keys/2,stream_list_keys/3]). -export([filter_buckets/1]). -export([filter_keys/2,filter_keys/3]). -export([list_buckets/0,list_buckets/2]). -export([get_index/3,get_index/2]). -export([stream_get_index/3,stream_get_index/2]). --export([set_bucket/2,get_bucket/1]). +-export([set_bucket/2,get_bucket/1,reset_bucket/1]). -export([reload_all/1]). -export([remove_from_cluster/1]). -export([get_stats/1]). @@ -56,158 +49,6 @@ -type riak_client() :: term(). -%% @spec mapred(Inputs :: riak_kv_mapred_term:mapred_inputs(), -%% Query :: [riak_kv_mapred_query:mapred_queryterm()]) -> -%% {ok, riak_kv_mapred_query:mapred_result()} | -%% {error, {bad_qterm, riak_kv_mapred_query:mapred_queryterm()}} | -%% {error, timeout} | -%% {error, Err :: term()} -%% @doc Perform a map/reduce job across the cluster. -%% See the map/reduce documentation for explanation of behavior. -%% @equiv mapred(Inputs, Query, default_timeout()) -mapred(Inputs,Query) -> mapred(Inputs,Query,?DEFAULT_TIMEOUT). - -%% @spec mapred(Inputs :: riak_kv_mapred_term:mapred_inputs(), -%% Query :: [riak_kv_mapred_query:mapred_queryterm()], -%% TimeoutMillisecs :: integer() | 'infinity') -> -%% {ok, riak_kv_mapred_query:mapred_result()} | -%% {error, {bad_qterm, riak_kv_mapred_query:mapred_queryterm()}} | -%% {error, timeout} | -%% {error, Err :: term()} -%% @doc Perform a map/reduce job across the cluster. -%% See the map/reduce documentation for explanation of behavior. -mapred(Inputs,Query,Timeout) -> - mapred(Inputs,Query,undefined,Timeout). - -%% @spec mapred(Inputs :: riak_kv_mapred_term:mapred_inputs(), -%% Query :: [riak_kv_mapred_query:mapred_queryterm()], -%% TimeoutMillisecs :: integer() | 'infinity', -%% ResultTransformer :: function()) -> -%% {ok, riak_kv_mapred_query:mapred_result()} | -%% {error, {bad_qterm, riak_kv_mapred_query:mapred_queryterm()}} | -%% {error, timeout} | -%% {error, Err :: term()} -%% @doc Perform a map/reduce job across the cluster. -%% See the map/reduce documentation for explanation of behavior. -mapred(Inputs,Query,ResultTransformer,Timeout) when is_binary(Inputs) orelse - is_tuple(Inputs) -> - case is_binary(Inputs) orelse is_key_filter(Inputs) of - true -> - mapred_bucket(Inputs, Query, ResultTransformer, Timeout); - false -> - Me = self(), - case mapred_stream(Query,Me,ResultTransformer,Timeout) of - {ok, {ReqId, FlowPid}} -> - mapred_dynamic_inputs_stream(FlowPid, Inputs, Timeout), - luke_flow:finish_inputs(FlowPid), - luke_flow:collect_output(ReqId, Timeout); - Error -> - Error - end - end; -mapred(Inputs,Query,ResultTransformer,Timeout) - when is_list(Query), - (is_integer(Timeout) orelse Timeout =:= infinity) -> - Me = self(), - case mapred_stream(Query,Me,ResultTransformer,Timeout) of - {ok, {ReqId, FlowPid}} -> - case is_list(Inputs) of - true -> - add_inputs(FlowPid, Inputs); - false -> - mapred_dynamic_inputs_stream(FlowPid, Inputs, Timeout) - end, - luke_flow:finish_inputs(FlowPid), - luke_flow:collect_output(ReqId, Timeout); - Error -> - Error - end. - -%% @spec mapred_stream(Query :: [riak_kv_mapred_query:mapred_queryterm()], -%% ClientPid :: pid()) -> -%% {ok, {ReqId :: term(), MR_FSM_PID :: pid()}} | -%% {error, {bad_qterm, riak_kv_mapred_query:mapred_queryterm()}} | -%% {error, Err :: term()} -%% @doc Perform a streaming map/reduce job across the cluster. -%% See the map/reduce documentation for explanation of behavior. -mapred_stream(Query,ClientPid) -> - mapred_stream(Query,ClientPid,?DEFAULT_TIMEOUT). - -%% @spec mapred_stream(Query :: [riak_kv_mapred_query:mapred_queryterm()], -%% ClientPid :: pid(), -%% TimeoutMillisecs :: integer() | 'infinity') -> -%% {ok, {ReqId :: term(), MR_FSM_PID :: pid()}} | -%% {error, {bad_qterm, riak_kv_mapred_query:mapred_queryterm()}} | -%% {error, Err :: term()} -%% @doc Perform a streaming map/reduce job across the cluster. -%% See the map/reduce documentation for explanation of behavior. -mapred_stream(Query, ClientPid, Timeout) -> - mapred_stream(Query, ClientPid, undefined, Timeout). - -%% @spec mapred_stream(Query :: [riak_kv_mapred_query:mapred_queryterm()], -%% ClientPid :: pid(), -%% TimeoutMillisecs :: integer() | 'infinity', -%% ResultTransformer :: function()) -> -%% {ok, {ReqId :: term(), MR_FSM_PID :: pid()}} | -%% {error, {bad_qterm, riak_kv_mapred_query:mapred_queryterm()}} | -%% {error, Err :: term()} -%% @doc Perform a streaming map/reduce job across the cluster. -%% See the map/reduce documentation for explanation of behavior. -mapred_stream(Query,ClientPid,ResultTransformer,Timeout) - when is_list(Query), is_pid(ClientPid), - (is_integer(Timeout) orelse Timeout =:= infinity) -> - ReqId = mk_reqid(), - case riak_kv_mapred_query:start(Node, ClientPid, ReqId, Query, ResultTransformer, Timeout) of - {ok, Pid} -> - {ok, {ReqId, Pid}}; - Error -> - Error - end. - -mapred_bucket_stream(Bucket, Query, ClientPid) -> - mapred_bucket_stream(Bucket, Query, ClientPid, ?DEFAULT_TIMEOUT). - -mapred_bucket_stream(Bucket, Query, ClientPid, Timeout) -> - mapred_bucket_stream(Bucket, Query, ClientPid, undefined, Timeout). - -mapred_bucket_stream(Bucket, Query, ClientPid, ResultTransformer, Timeout) -> - {ok,{MR_ReqId,MR_FSM}} = mapred_stream(Query,ClientPid,ResultTransformer,Timeout), - {ok,_Stream_ReqID} = stream_list_keys(Bucket, Timeout, - MR_FSM, mapred), - {ok,MR_ReqId}. - - -%% @deprecated Only in place for backwards compatibility. -mapred_bucket_stream(Bucket, Query, ClientPid, ResultTransformer, Timeout, _) -> - mapred_bucket_stream(Bucket, Query, ClientPid, ResultTransformer, Timeout). - -mapred_bucket(Bucket, Query) -> - mapred_bucket(Bucket, Query, ?DEFAULT_TIMEOUT). - -mapred_bucket(Bucket, Query, Timeout) -> - mapred_bucket(Bucket, Query, undefined, Timeout). - -mapred_bucket(Bucket, Query, ResultTransformer, Timeout) -> - Me = self(), - {ok,MR_ReqId} = mapred_bucket_stream(Bucket, Query, Me, - ResultTransformer, Timeout), - luke_flow:collect_output(MR_ReqId, Timeout). - --define(PRINT(Var), io:format("DEBUG: ~p:~p - ~p~n~n ~p~n~n", [?MODULE, ?LINE, ??Var, Var])). - -%% An InputDef defines a Module and Function to call to generate -%% inputs for a map/reduce job. Should return {ok, -%% LukeReqID}. Ideally, we'd combine both the other input types (BKeys -%% and Bucket) into this approach, but postponing until after a code -%% review of Map/Reduce. -mapred_dynamic_inputs_stream(FSMPid, InputDef, Timeout) -> - case InputDef of - {modfun, Mod, Fun, Options} -> - Mod:Fun(FSMPid, Options, Timeout); - _ -> - throw({invalid_inputdef, InputDef}) - end. - %% @spec get(riak_object:bucket(), riak_object:key()) -> %% {ok, riak_object:riak_object()} | %% {error, notfound} | @@ -218,7 +59,7 @@ mapred_dynamic_inputs_stream(FSMPid, InputDef, Timeout) -> %% @doc Fetch the object at Bucket/Key. Return a value as soon as the default %% R-value for the nodes have responded with a value or error. %% @equiv get(Bucket, Key, R, default_timeout()) -get(Bucket, Key) -> +get(Bucket, Key) -> get(Bucket, Key, []). %% @spec get(riak_object:bucket(), riak_object:key(), options()) -> @@ -249,7 +90,7 @@ get(Bucket, Key, Options) when is_list(Options) -> %% @doc Fetch the object at Bucket/Key. Return a value as soon as R %% nodes have responded with a value or error. %% @equiv get(Bucket, Key, R, default_timeout()) -get(Bucket, Key, R) -> +get(Bucket, Key, R) -> get(Bucket, Key, [{r, R}]). %% @spec get(riak_object:bucket(), riak_object:key(), R :: integer(), @@ -462,15 +303,6 @@ list_keys(Bucket) -> list_keys(Bucket, Timeout) -> list_keys(Bucket, none, Timeout). -%% @deprecated Only in place for backwards compatibility. -list_keys(Bucket, Timeout, ErrorTolerance) when is_integer(Timeout) -> - %% @TODO This code is only here to support - %% rolling upgrades and will be removed. - Me = self(), - ReqId = mk_reqid(), - FSM_Timeout = trunc(Timeout / 8), - riak_kv_keys_fsm_legacy_sup:start_keys_fsm(Node, [ReqId, Bucket, FSM_Timeout, plain, ErrorTolerance, Me]), - wait_for_listkeys(ReqId, Timeout); %% @spec list_keys(riak_object:bucket(), TimeoutMillisecs :: integer()) -> %% {ok, [Key :: riak_object:key()]} | %% {error, timeout} | @@ -478,18 +310,11 @@ list_keys(Bucket, Timeout, ErrorTolerance) when is_integer(Timeout) -> %% @doc List the keys known to be present in Bucket. %% Key lists are updated asynchronously, so this may be slightly %% out of date if called immediately after a put or delete. -list_keys(Bucket, Filter, Timeout) -> - case app_helper:get_env(riak_kv, legacy_keylisting) of - true -> - %% @TODO This code is only here to support - %% rolling upgrades and will be removed. - list_keys(Bucket, Timeout, ?DEFAULT_ERRTOL); - _ -> - Me = self(), - ReqId = mk_reqid(), - riak_kv_keys_fsm_sup:start_keys_fsm(Node, [{raw, ReqId, Me}, [Bucket, Filter, Timeout, plain]]), - wait_for_listkeys(ReqId, Timeout) - end. +list_keys(Bucket, Filter, Timeout) -> + Me = self(), + ReqId = mk_reqid(), + riak_kv_keys_fsm_sup:start_keys_fsm(Node, [{raw, ReqId, Me}, [Bucket, Filter, Timeout]]), + wait_for_listkeys(ReqId, Timeout). stream_list_keys(Bucket) -> stream_list_keys(Bucket, ?DEFAULT_TIMEOUT). @@ -498,27 +323,9 @@ stream_list_keys(Bucket, Timeout) -> Me = self(), stream_list_keys(Bucket, Timeout, Me). -stream_list_keys(Bucket, Timeout, Client) when is_pid(Client) -> - stream_list_keys(Bucket, Timeout, Client, plain); -%% @deprecated Only in place for backwards compatibility. -stream_list_keys(Bucket, Timeout, _) -> - stream_list_keys(Bucket, Timeout). - -%% @deprecated Only in place for backwards compatibility. -stream_list_keys(Bucket0, Timeout, ErrorTolerance, Client, ClientType) -> - ReqId = mk_reqid(), - case build_filter(Bucket0) of - {ok, Filter} -> - riak_kv_keys_fsm_legacy_sup:start_keys_fsm(Node, [ReqId, Filter, Timeout, ClientType, ErrorTolerance, Client]), - {ok, ReqId}; - Error -> - Error - end. - %% @spec stream_list_keys(riak_object:bucket(), %% TimeoutMillisecs :: integer(), -%% Client :: pid(), -%% ClientType :: atom()) -> +%% Client :: pid()) -> %% {ok, ReqId :: term()} %% @doc List the keys known to be present in Bucket. %% Key lists are updated asynchronously, so this may be slightly @@ -528,45 +335,31 @@ stream_list_keys(Bucket0, Timeout, ErrorTolerance, Client, ClientType) -> %% and a final {ReqId, done} message. %% None of the Keys lists will be larger than the number of %% keys in Bucket on any single vnode. -%% If ClientType is set to 'mapred' instead of 'plain', then the -%% messages will be sent in the form of a MR input stream. -stream_list_keys(Input, Timeout, Client, ClientType) when is_pid(Client) -> - case app_helper:get_env(riak_kv, legacy_keylisting) of - true -> - %% @TODO This code is only here to support - %% rolling upgrades and will be removed. - stream_list_keys(Input, Timeout, ?DEFAULT_ERRTOL, Client, ClientType); - _ -> - ReqId = mk_reqid(), - case Input of - {Bucket, FilterInput} -> - case riak_kv_mapred_filter:build_filter(FilterInput) of - {error, _Error} -> - {error, _Error}; - {ok, FilterExprs} -> - riak_kv_keys_fsm_sup:start_keys_fsm(Node, - [{raw, - ReqId, - Client}, - [Bucket, - FilterExprs, - Timeout, - ClientType]]), - {ok, ReqId} - end; - Bucket -> - riak_kv_keys_fsm_sup:start_keys_fsm(Node, - [{raw, ReqId, Client}, +stream_list_keys(Input, Timeout, Client) when is_pid(Client) -> + ReqId = mk_reqid(), + case Input of + {Bucket, FilterInput} -> + case riak_kv_mapred_filters:build_filter(FilterInput) of + {error, _Error} -> + {error, _Error}; + {ok, FilterExprs} -> + riak_kv_keys_fsm_sup:start_keys_fsm(Node, + [{raw, + ReqId, + Client}, [Bucket, - none, - Timeout, - ClientType]]), + FilterExprs, + Timeout]]), {ok, ReqId} - end - end; -%% @deprecated Only in place for backwards compatibility. -stream_list_keys(Bucket, Timeout, ErrorTolerance, Client) -> - stream_list_keys(Bucket, Timeout, ErrorTolerance, Client, plain). + end; + Bucket -> + riak_kv_keys_fsm_sup:start_keys_fsm(Node, + [{raw, ReqId, Client}, + [Bucket, + none, + Timeout]]), + {ok, ReqId} + end. %% @spec filter_keys(riak_object:bucket(), Fun :: function()) -> %% {ok, [Key :: riak_object:key()]} | @@ -578,14 +371,7 @@ stream_list_keys(Bucket, Timeout, ErrorTolerance, Client) -> %% out of date if called immediately after a put or delete. %% @equiv filter_keys(Bucket, Fun, default_timeout()) filter_keys(Bucket, Fun) -> - case app_helper:get_env(riak_kv, legacy_keylisting) of - true -> - %% @TODO This code is only here to support - %% rolling upgrades and will be removed. - list_keys({filter, Bucket, Fun}, ?DEFAULT_TIMEOUT*8); - _ -> - list_keys(Bucket, Fun, ?DEFAULT_TIMEOUT) - end. + list_keys(Bucket, Fun, ?DEFAULT_TIMEOUT). %% @spec filter_keys(riak_object:bucket(), Fun :: function(), TimeoutMillisecs :: integer()) -> %% {ok, [Key :: riak_object:key()]} | @@ -596,14 +382,7 @@ filter_keys(Bucket, Fun) -> %% Key lists are updated asynchronously, so this may be slightly %% out of date if called immediately after a put or delete. filter_keys(Bucket, Fun, Timeout) -> - case app_helper:get_env(riak_kv, legacy_keylisting) of - true -> - %% @TODO This code is only here to support - %% rolling upgrades and will be removed. - list_keys({filter, Bucket, Fun}, Timeout); - _ -> - list_keys(Bucket, Fun, Timeout) - end. + list_keys(Bucket, Fun, Timeout). %% @spec list_buckets() -> %% {ok, [Bucket :: riak_object:bucket()]} | @@ -628,17 +407,10 @@ list_buckets() -> %% either adds the first key or removes the last remaining key from %% a bucket. list_buckets(Filter, Timeout) -> - case app_helper:get_env(riak_kv, legacy_keylisting) of - true -> - %% @TODO This code is only here to support - %% rolling upgrades and will be removed. - list_keys('_', Timeout); - _ -> - Me = self(), - ReqId = mk_reqid(), - riak_kv_buckets_fsm_sup:start_buckets_fsm(Node, [{raw, ReqId, Me}, [Filter, Timeout, plain]]), - wait_for_listbuckets(ReqId, Timeout) - end. + Me = self(), + ReqId = mk_reqid(), + riak_kv_buckets_fsm_sup:start_buckets_fsm(Node, [{raw, ReqId, Me}, [Filter, Timeout]]), + wait_for_listbuckets(ReqId, Timeout). %% @spec filter_buckets(Fun :: function()) -> %% {ok, [Bucket :: riak_object:bucket()]} | @@ -646,14 +418,7 @@ list_buckets(Filter, Timeout) -> %% {error, Err :: term()} %% @doc Return a list of filtered buckets. filter_buckets(Fun) -> - case app_helper:get_env(riak_kv, legacy_keylisting) of - true -> - %% @TODO This code is only here to support - %% rolling upgrades and will be removed. - list_keys('_', ?DEFAULT_TIMEOUT); - _ -> - list_buckets(Fun, ?DEFAULT_TIMEOUT) - end. + list_buckets(Fun, ?DEFAULT_TIMEOUT). %% @spec get_index(Bucket :: binary(), %% Query :: riak_index:query_def()) -> @@ -676,7 +441,7 @@ get_index(Bucket, Query) -> get_index(Bucket, Query, Timeout) -> Me = self(), ReqId = mk_reqid(), - riak_kv_index_fsm_sup:start_index_fsm(Node, [{raw, ReqId, Me}, [Bucket, none, Query, Timeout, plain]]), + riak_kv_index_fsm_sup:start_index_fsm(Node, [{raw, ReqId, Me}, [Bucket, none, Query, Timeout]]), wait_for_query_results(ReqId, Timeout). %% @spec stream_get_index(Bucket :: binary(), @@ -700,7 +465,7 @@ stream_get_index(Bucket, Query) -> stream_get_index(Bucket, Query, Timeout) -> Me = self(), ReqId = mk_reqid(), - riak_kv_index_fsm_sup:start_index_fsm(Node, [{raw, ReqId, Me}, [Bucket, none, Query, Timeout, plain]]), + riak_kv_index_fsm_sup:start_index_fsm(Node, [{raw, ReqId, Me}, [Bucket, none, Query, Timeout]]), {ok, ReqId}. %% @spec set_bucket(riak_object:bucket(), [BucketProp :: {atom(),term()}]) -> ok @@ -715,6 +480,10 @@ set_bucket(BucketName,BucketProps) -> %% See riak_core_bucket for expected useful properties. get_bucket(BucketName) -> rpc:call(Node,riak_core_bucket,get_bucket,[BucketName]). +%% @spec reset_bucket(riak_object:bucket()) -> ok +%% @doc Reset properties for this Bucket to the default values +reset_bucket(BucketName) -> + rpc:call(Node,riak_core_bucket,reset_bucket,[BucketName]). %% @spec reload_all(Module :: atom()) -> term() %% @doc Force all Riak nodes to reload Module. %% This is used when loading new modules for map/reduce functionality. @@ -746,7 +515,8 @@ for_dialyzer_only_ignore(X, Y) -> ?MODULE:new(X, Y). %% @private -mk_reqid() -> erlang:phash2(erlang:now()). % only has to be unique per-pid +mk_reqid() -> + erlang:phash2({self(), os:timestamp()}). % only has to be unique per-pid %% @private wait_for_reqid(ReqId, Timeout) -> @@ -774,7 +544,7 @@ wait_for_listkeys(ReqId,Timeout,Acc) -> %% @private wait_for_listbuckets(ReqId, Timeout) -> - receive + receive {ReqId,{buckets, Buckets}} -> {ok, Buckets}; {ReqId, Error} -> {error, Error} after Timeout -> @@ -791,36 +561,9 @@ wait_for_query_results(ReqId, Timeout, Acc) -> {ReqId,{results, Res}} -> wait_for_query_results(ReqId, Timeout, [Res | Acc]); {ReqId, Error} -> {error, Error} after Timeout -> - {error, timeout, Acc} + {error, timeout} end. -add_inputs(_FlowPid, []) -> - ok; -add_inputs(FlowPid, Inputs) when length(Inputs) < 100 -> - luke_flow:add_inputs(FlowPid, Inputs); - add_inputs(FlowPid, Inputs) -> - {Current, Next} = lists:split(100, Inputs), - luke_flow:add_inputs(FlowPid, Current), - add_inputs(FlowPid, Next). - -is_key_filter({Bucket, Filters}) when is_binary(Bucket), - is_list(Filters) -> - true; -is_key_filter(_) -> - false. - -%% @deprecated This function is only here to support -%% rolling upgrades and will be removed. -build_filter({Bucket, Exprs}) -> - case riak_kv_mapred_filters:build_filter(Exprs) of - {ok, Filters} -> - {ok, {Bucket, Filters}}; - Error -> - Error - end; -build_filter(Bucket) when is_binary(Bucket) -> - {ok, {Bucket, []}}. - recv_timeout(Options) -> case proplists:get_value(recv_timeout, Options) of undefined -> diff --git a/src/riak_index.erl b/src/riak_index.erl index 242184101f..ac72c75da5 100644 --- a/src/riak_index.erl +++ b/src/riak_index.erl @@ -31,7 +31,8 @@ parse_fields/1, format_failure_reason/1, normalize_index_field/1, - timestamp/0 + timestamp/0, + to_index_query/2 ]). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). @@ -56,9 +57,6 @@ mapred_index(Dest, Args) -> mapred_index(Dest, Args, ?TIMEOUT). -mapred_index(FlowPid, [_Bucket, _Query], _Timeout) - when is_pid(FlowPid) -> - throw({not_supported, mapred_index, FlowPid}); mapred_index(_Pipe, [Bucket, Query], Timeout) -> {ok, C} = riak:local_client(), {ok, ReqId} = C:stream_get_index(Bucket, Query, Timeout), @@ -224,9 +222,43 @@ format_failure_reason(FailureReason) -> %% @doc Get a timestamp, the number of milliseconds returned by %% erlang:now(). timestamp() -> - {MegaSeconds,Seconds,MilliSeconds}=erlang:now(), + {MegaSeconds,Seconds,MilliSeconds}=os:timestamp(), (MegaSeconds * 1000000000000) + (Seconds * 1000000) + MilliSeconds. +%% @spec to_index_query(binary(), [binary()]) -> +%% {ok, {atom(), binary(), list(binary())}} | {error, Reasons}. +%% @doc Given an IndexOp, IndexName, and Args, construct and return a +%% valid query, or a list of errors if the query is malformed. +to_index_query(IndexField, Args) -> + %% Normalize the index field... + IndexField1 = riak_index:normalize_index_field(IndexField), + + %% Normalize the arguments... + case riak_index:parse_fields([{IndexField1, X} || X <- Args]) of + {ok, []} -> + {error, {too_few_arguments, Args}}; + + {ok, [{_, Value}]} -> + %% One argument == exact match query + {ok, {eq, IndexField1, Value}}; + + {ok, [{_, Start}, {_, End}]} -> + %% Two arguments == range query + case End > Start of + true -> + {ok, {range, IndexField1, Start, End}}; + false -> + {error, {invalid_range, Args}} + end; + + {ok, _} -> + {error, {too_many_arguments, Args}}; + + {error, FailureReasons} -> + {error, FailureReasons} + end. + + %% @spec field_types() -> data_type_defs(). %% %% @doc Return a list of {MatchFunction, ParseFunction} tuples that diff --git a/src/riak_kv.app.src b/src/riak_kv.app.src index 2273e7c4ed..ee28363a04 100644 --- a/src/riak_kv.app.src +++ b/src/riak_kv.app.src @@ -3,14 +3,14 @@ {application, riak_kv, [ {description, "Riak Key/Value Store"}, - {vsn, "1.1.1"}, + {vsn, "1.2.1"}, {applications, [ kernel, stdlib, sasl, crypto, + riak_api, riak_core, - luke, erlang_js, mochiweb, webmachine, @@ -26,13 +26,6 @@ %% Secondary code paths {add_paths, []}, - %% This option enables compatability of bucket and key listing - %% with 0.14 and earlier versions. Once a rolling upgrade to - %% a version > 0.14 is completed for a cluster, this should be - %% set to false for improved performance for bucket and key - %% listing operations. - {legacy_keylisting, true}, - %% This option toggles compatibility of keylisting with 1.0 %% and earlier versions. Once a rolling upgrade to a version %% > 1.0 is completed for a cluster, this should be set to diff --git a/src/riak_kv_app.erl b/src/riak_kv_app.erl index 676db5c377..25fe40e5d6 100644 --- a/src/riak_kv_app.erl +++ b/src/riak_kv_app.erl @@ -23,7 +23,14 @@ -module(riak_kv_app). -behaviour(application). --export([start/2,stop/1]). +-export([start/2, prep_stop/1, stop/1]). + +-define(SERVICES, [{riak_kv_pb_object, 3, 6}, %% ClientID stuff + {riak_kv_pb_object, 9, 14}, %% Object requests + {riak_kv_pb_bucket, 15, 22}, %% Bucket requests + {riak_kv_pb_mapred, 23, 24}, %% MapReduce requests + {riak_kv_pb_index, 25, 26} %% Secondary index requests + ]). %% @spec start(Type :: term(), StartArgs :: term()) -> %% {ok,Pid} | ignore | {error,Error} @@ -82,14 +89,59 @@ start(_Type, _StartArgs) -> %% Spin up supervisor case riak_kv_sup:start_link() of {ok, Pid} -> + %% Register capabilities + riak_core_capability:register({riak_kv, vnode_vclocks}, + [true, false], + false, + {riak_kv, + vnode_vclocks, + [{true, true}, {false, false}]}), + + riak_core_capability:register({riak_kv, legacy_keylisting}, + [false], + false, + {riak_kv, + legacy_keylisting, + [{false, false}]}), + + riak_core_capability:register({riak_kv, listkeys_backpressure}, + [true, false], + false, + {riak_kv, + listkeys_backpressure, + [{true, true}, {false, false}]}), + + riak_core_capability:register({riak_kv, index_backpressure}, + [true, false], + false), + + %% mapred_system should remain until no nodes still exist + %% that would propose 'legacy' as the default choice + riak_core_capability:register({riak_kv, mapred_system}, + [pipe], + pipe, + {riak_kv, + mapred_system, + [{pipe, pipe}]}), + + riak_core_capability:register({riak_kv, mapred_2i_pipe}, + [true, false], + false, + {riak_kv, + mapred_2i_pipe, + [{true, true}, {false, false}]}), + %% Go ahead and mark the riak_kv service as up in the node watcher. %% The riak_core_ring_handler blocks until all vnodes have been started %% synchronously. riak_core:register(riak_kv, [ {vnode_module, riak_kv_vnode}, - {bucket_validator, riak_kv_bucket} + {bucket_validator, riak_kv_bucket}, + {stat_mod, riak_kv_stat} ]), + ok = riak_api_pb_service:register(?SERVICES), + %% Add routes to webmachine [ webmachine_router:add_route(R) || R <- lists:reverse(riak_kv_web:dispatch_table()) ], @@ -98,9 +150,30 @@ start(_Type, _StartArgs) -> {error, Reason} end. +%% @doc Prepare to stop - called before the supervisor tree is shutdown +prep_stop(_State) -> + try %% wrap with a try/catch - application carries on regardless, + %% no error message or logging about the failure otherwise. + + lager:info("Stopping application riak_kv - marked service down.\n", []), + riak_core_node_watcher:service_down(riak_kv) + + %% TODO: Gracefully unregister riak_kv webmachine endpoints. + %% Cannot do this currently as it calls application:set_env while this function + %% is itself inside of application controller. webmachine really needs it's own + %% ETS table for dispatch information. + %%[ webmachine_router:remove_route(R) || R <- riak_kv_web:dispatch_table() ], + catch + Type:Reason -> + lager:error("Stopping application riak_api - ~p:~p.\n", [Type, Reason]) + end, + stopping. + %% @spec stop(State :: term()) -> ok %% @doc The application:stop callback for riak. stop(_State) -> + ok = riak_api_pb_service:deregister(?SERVICES), + lager:info("Stopped application riak_kv.\n", []), ok. %% 719528 days from Jan 1, 0 to Jan 1, 1970 @@ -113,7 +186,7 @@ check_epoch() -> %% doc for erlang:now/0 says return value is platform-dependent %% -> let's emit an error if this platform doesn't think the epoch %% is Jan 1, 1970 - {MSec, Sec, _} = erlang:now(), + {MSec, Sec, _} = os:timestamp(), GSec = calendar:datetime_to_gregorian_seconds( calendar:universal_time()), case GSec - ((MSec*1000000)+Sec) of @@ -127,4 +200,3 @@ check_epoch() -> "but your system says the epoch is ~p", [Epoch]), ok end. - diff --git a/src/riak_kv_bitcask_backend.erl b/src/riak_kv_bitcask_backend.erl index d83138ecba..280b97bc89 100644 --- a/src/riak_kv_bitcask_backend.erl +++ b/src/riak_kv_bitcask_backend.erl @@ -124,12 +124,17 @@ start(Partition, Config) -> %% @doc Stop the bitcask backend -spec stop(state()) -> ok. stop(#state{ref=Ref}) -> - bitcask:close(Ref). + case Ref of + undefined -> + ok; + _ -> + bitcask:close(Ref) + end. %% @doc Retrieve an object from the bitcask backend -spec get(riak_object:bucket(), riak_object:key(), state()) -> {ok, any(), state()} | - {ok, not_found, state()} | + {error, not_found, state()} | {error, term(), state()}. get(Bucket, Key, #state{ref=Ref}=State) -> BitcaskKey = term_to_binary({Bucket, Key}), @@ -304,8 +309,7 @@ fold_objects(FoldObjectsFun, Acc, Opts, #state{opts=BitcaskOpts, -spec drop(state()) -> {ok, state()} | {error, term(), state()}. drop(#state{ref=Ref, partition=Partition, - root=DataRoot, - opts=BitcaskOpts}=State) -> + root=DataRoot}=State) -> %% Close the bitcask reference bitcask:close(Ref), @@ -321,44 +325,27 @@ drop(#state{ref=Ref, CleanupDir = check_for_cleanup_dir(DataRoot, auto), move_unused_dirs(CleanupDir, PartitionDirs), + %% Spawn a process to cleanup the old data files. + %% The use of spawn is intentional. We do not + %% care if this process dies since any lingering + %% files will be cleaned up on the next drop. + %% The worst case is that the files hang + %% around and take up some disk space. + spawn(drop_data_cleanup(PartitionStr, CleanupDir)), + %% Make sure the data directory is now empty data_directory_cleanup(PartitionDir), - - case make_data_dir(filename:join([DataRoot, - PartitionStr])) of - {ok, DataDir} -> - %% Spawn a process to cleanup the old data files. - %% The use of spawn is intentional. We do not - %% care if this process dies since any lingering - %% files will be cleaned up on the next drop. - %% The worst case is that the files hang - %% around and take up some disk space. - spawn(drop_data_cleanup(PartitionStr, CleanupDir)), - - %% Now open the bitcask and return an updated state - %% so this backend can continue processing. - case bitcask:open(filename:join(DataRoot, DataDir), BitcaskOpts) of - Ref1 when is_reference(Ref1) -> - {ok, State#state{data_dir=DataDir, - ref=Ref1}}; - {error, Reason} -> - {error, Reason, State#state{data_dir=DataDir}} - end; - {error, Reason1} -> - {error, Reason1, State} - end. + {ok, State#state{ref = undefined}}. %% @doc Returns true if this bitcasks backend contains any %% non-tombstone values; otherwise returns false. -spec is_empty(state()) -> boolean(). is_empty(#state{ref=Ref}) -> - %% Determining if a bitcask is empty requires us to find at least - %% one value that is NOT a tombstone. Accomplish this by doing a fold_keys - %% that forcibly bails on the very first key encountered. - F = fun(_K, _Acc0) -> - throw(found_one_value) - end, - (catch bitcask:fold_keys(Ref, F, undefined)) /= found_one_value. + %% Estimate if we are empty or not as determining for certain + %% requires a fold over the keyspace that may block. The estimate may + %% return false when this bitcask is actually empty, but it will never + %% return true when the bitcask has data. + bitcask:is_empty_estimate(Ref). %% @doc Get the status information for this bitcask backend -spec status(state()) -> [{atom(), term()}]. @@ -379,11 +366,12 @@ callback(Ref, merge_check, #state{ref=Ref, data_dir=DataDir, + opts=BitcaskOpts, root=DataRoot}=State) when is_reference(Ref) -> case bitcask:needs_merge(Ref) of {true, Files} -> BitcaskRoot = filename:join(DataRoot, DataDir), - bitcask_merge_worker:merge(BitcaskRoot, [], Files); + bitcask_merge_worker:merge(BitcaskRoot, BitcaskOpts, Files); false -> ok end, @@ -736,7 +724,7 @@ drop_test() -> %% Stop the backend ok = stop(State1), os:cmd("rm -rf test/bitcask-backend/*"), - ?assertEqual(["42", "auto_cleanup"], lists:sort(DataDirs)), + ?assertEqual(["auto_cleanup"], lists:sort(DataDirs)), %% The drop cleanup happens in a separate process so %% there is no guarantee it has happened yet when %% this test runs. diff --git a/src/riak_kv_bucket.erl b/src/riak_kv_bucket.erl index 63a1794535..e83945979c 100644 --- a/src/riak_kv_bucket.erl +++ b/src/riak_kv_bucket.erl @@ -44,7 +44,8 @@ validate([], ValidProps, Errors) -> validate([{BoolProp, MaybeBool}|T], ValidProps, Errors) when is_atom(BoolProp), BoolProp =:= allow_mult orelse BoolProp =:= basic_quorum orelse BoolProp =:= last_write_wins - orelse BoolProp =:= notfound_ok -> + orelse BoolProp =:= notfound_ok + orelse BoolProp =:= stat_tracked -> case coerce_bool(MaybeBool) of error -> validate(T, ValidProps, [{BoolProp, not_boolean}|Errors]); diff --git a/src/riak_kv_buckets_fsm.erl b/src/riak_kv_buckets_fsm.erl index 056e1ce52c..df835360a8 100644 --- a/src/riak_kv_buckets_fsm.erl +++ b/src/riak_kv_buckets_fsm.erl @@ -36,59 +36,50 @@ -type req_id() :: non_neg_integer(). -record(state, {buckets=sets:new() :: [term()], - client_type :: plain | mapred, from :: from()}). +-include("riak_kv_dtrace.hrl"). + %% @doc Return a tuple containing the ModFun to call per vnode, %% the number of primary preflist vnodes the operation %% should cover, the service to use to check for available nodes, %% and the registered name to use to access the vnode master process. -init(From={_, _, ClientPid}, [ItemFilter, Timeout, ClientType]) -> - case ClientType of - %% Link to the mapred job so we die if the job dies - mapred -> - link(ClientPid); - _ -> - ok - end, +init(From={_, _, ClientPid}, [ItemFilter, Timeout]) -> + ClientNode = atom_to_list(node(ClientPid)), + PidStr = pid_to_list(ClientPid), + FilterX = if ItemFilter == none -> 0; + true -> 1 + end, + %% "other" is a legacy term from when MapReduce used this FSM (in + %% which case, the string "mapred" would appear + ?DTRACE(?C_BUCKETS_INIT, [2, FilterX], + [<<"other">>, ClientNode, PidStr]), %% Construct the bucket listing request Req = ?KV_LISTBUCKETS_REQ{item_filter=ItemFilter}, {Req, allup, 1, 1, riak_kv, riak_kv_vnode_master, Timeout, - #state{client_type=ClientType, from=From}}. + #state{from=From}}. process_results(done, StateData) -> {done, StateData}; process_results(Buckets, StateData=#state{buckets=BucketAcc}) -> + ?DTRACE(?C_BUCKETS_PROCESS_RESULTS, [length(Buckets)], []), {ok, StateData#state{buckets=sets:union(sets:from_list(Buckets), BucketAcc)}}; process_results({error, Reason}, _State) -> + ?DTRACE(?C_BUCKETS_PROCESS_RESULTS, [-1], []), {error, Reason}. finish({error, Error}, - StateData=#state{client_type=ClientType, - from={raw, ReqId, ClientPid}}) -> - case ClientType of - mapred -> - %% An error occurred or the timeout interval elapsed - %% so all we can do now is die so that the rest of the - %% MapReduce processes will also die and be cleaned up. - exit(Error); - plain -> - %% Notify the requesting client that an error - %% occurred or the timeout has elapsed. - ClientPid ! {ReqId, Error} - end, + StateData=#state{from={raw, ReqId, ClientPid}}) -> + ?DTRACE(?C_BUCKETS_FINISH, [-1], []), + %% Notify the requesting client that an error + %% occurred or the timeout has elapsed. + ClientPid ! {ReqId, Error}, {stop, normal, StateData}; finish(clean, StateData=#state{buckets=Buckets, - client_type=ClientType, from={raw, ReqId, ClientPid}}) -> - case ClientType of - mapred -> - luke_flow:add_inputs(Buckets), - luke_flow:finish_inputs(ClientPid); - plain -> - ClientPid ! {ReqId, {buckets, sets:to_list(Buckets)}} - end, + ClientPid ! {ReqId, {buckets, sets:to_list(Buckets)}}, + ?DTRACE(?C_BUCKETS_FINISH, [0], []), {stop, normal, StateData}. diff --git a/src/riak_kv_console.erl b/src/riak_kv_console.erl index 9de459d1c1..852ed4420b 100644 --- a/src/riak_kv_console.erl +++ b/src/riak_kv_console.erl @@ -25,6 +25,7 @@ -module(riak_kv_console). -export([join/1, + staged_join/1, leave/1, remove/1, status/1, @@ -36,11 +37,24 @@ down/1, reload_code/1]). +%% Arrow is 24 chars wide +-define(ARROW, "=======================>"). + + join([NodeStr]) -> + join(NodeStr, fun riak_core:join/1, + "Sent join request to ~s~n", [NodeStr]). + +staged_join([NodeStr]) -> + Node = list_to_atom(NodeStr), + join(NodeStr, fun riak_core:staged_join/1, + "Success: staged join request for ~p to ~p~n", [node(), Node]). + +join(NodeStr, JoinFn, SuccessFmt, SuccessArgs) -> try - case riak_core:join(NodeStr) of + case JoinFn(NodeStr) of ok -> - io:format("Sent join request to ~s~n", [NodeStr]), + io:format(SuccessFmt, SuccessArgs), ok; {error, not_reachable} -> io:format("Node ~s is not reachable!~n", [NodeStr]), @@ -56,6 +70,10 @@ join([NodeStr]) -> io:format("Failed: This node is already a member of a " "cluster~n"), error; + {error, self_join} -> + io:format("Failed: This node cannot join itself in a " + "cluster~n"), + error; {error, _} -> io:format("Join failed. Try again in a few moments.~n", []), error @@ -259,8 +277,46 @@ transfers([]) -> Reason]), io:format("Transfers failed, see log for details~n"), error - end. + end, + %% Now display active transfers + {Xfers, Down} = riak_core_status:all_active_transfers(), + + DisplayXfer = + fun({{Mod, Partition}, Node, outbound, active, _Status}) -> + print_v1_status(Mod, Partition, Node); + + ({status_v2, Status}) -> + %% Display base status + Type = proplists:get_value(type, Status), + Mod = proplists:get_value(mod, Status), + SrcPartition = proplists:get_value(src_partition, Status), + TargetPartition = proplists:get_value(target_partition, Status), + StartTS = proplists:get_value(start_ts, Status), + SrcNode = proplists:get_value(src_node, Status), + TargetNode = proplists:get_value(target_node, Status), + + print_v2_status(Type, Mod, {SrcPartition, TargetPartition}, StartTS), + + %% Get info about stats if there is any yet + Stats = proplists:get_value(stats, Status), + + print_stats(SrcNode, TargetNode, Stats), + io:format("~n"); + + (_) -> + ignore + end, + DisplayDown = + fun(Node) -> + io:format("Node ~p could not be contacted~n", [Node]) + end, + + io:format("~nActive Transfers:~n~n", []), + [DisplayXfer(Xfer) || Xfer <- lists:flatten(Xfers)], + + io:format("~n"), + [DisplayDown(Node) || Node <- Down]. cluster_info([OutFile|Rest]) -> try @@ -309,10 +365,18 @@ reload_file(Filename) -> io:format("Module ~w not yet loaded, skipped.~n", [Mod]) end. +%%%=================================================================== +%%% Private +%%%=================================================================== + +datetime_str({_Mega, _Secs, _Micro}=Now) -> + datetime_str(calendar:now_to_datetime(Now)); +datetime_str({{Year, Month, Day}, {Hour, Min, Sec}}) -> + riak_core_format:fmt("~4..0B-~2..0B-~2..0B ~2..0B:~2..0B:~2..0B", + [Year,Month,Day,Hour,Min,Sec]). + format_stats([], Acc) -> lists:reverse(Acc); -format_stats([{vnode_gets, V}|T], Acc) -> - format_stats(T, [io_lib:format("vnode gets : ~p~n", [V])|Acc]); format_stats([{Stat, V}|T], Acc) -> format_stats(T, [io_lib:format("~p : ~p~n", [Stat, V])|Acc]). @@ -357,3 +421,54 @@ print_vnode_status([StatusItem | RestStatusItems]) -> io:format("Status: ~n~p~n", [StatusItem]) end, print_vnode_status(RestStatusItems). + +print_v2_status(Type, Mod, {SrcPartition, TargetPartition}, StartTS) -> + StartTSStr = datetime_str(StartTS), + Running = timer:now_diff(os:timestamp(), StartTS), + RunningStr = riak_core_format:human_time_fmt("~.2f", Running), + + io:format("transfer type: ~s~n", [Type]), + io:format("vnode type: ~p~n", [Mod]), + case Type of + repair -> + io:format("source partition: ~p~n", [SrcPartition]), + io:format("target partition: ~p~n", [TargetPartition]); + _ -> + io:format("partition: ~p~n", [TargetPartition]) + end, + io:format("started: ~s [~s ago]~n", [StartTSStr, RunningStr]). + +print_v1_status(Mod, Partition, Node) -> + io:format("vnode type: ~p~n", [Mod]), + io:format("partition: ~p~n", [Partition]), + io:format("target node: ~p~n~n", [Node]). + +print_stats(SrcNode, TargetNode, no_stats) -> + ToFrom = riak_core_format:fmt("~16s ~s ~16s", + [SrcNode, ?ARROW, TargetNode]), + Width = length(ToFrom), + + io:format("last update: no updates seen~n"), + io:format("objects transferred: unknown~n~n"), + io:format("~s~n", [string:centre("unknown", Width)]), + io:format("~s~n", [ToFrom]), + io:format("~s~n", [string:centre("unknown", Width)]); +print_stats(SrcNode, TargetNode, Stats) -> + ObjsS = proplists:get_value(objs_per_s, Stats), + BytesS = proplists:get_value(bytes_per_s, Stats), + LastUpdate = proplists:get_value(last_update, Stats), + Diff = timer:now_diff(os:timestamp(), LastUpdate), + DiffStr = riak_core_format:human_time_fmt("~.2f", Diff), + Objs = proplists:get_value(objs_total, Stats), + ObjsSStr = riak_core_format:fmt("~p Objs/s", [ObjsS]), + ByteStr = riak_core_format:human_size_fmt("~.2f", BytesS) ++ "/s", + TS = datetime_str(LastUpdate), + ToFrom = riak_core_format:fmt("~16s ~s ~16s", + [SrcNode, ?ARROW, TargetNode]), + Width = length(ToFrom), + + io:format("last update: ~s [~s ago]~n", [TS, DiffStr]), + io:format("objects transferred: ~p~n~n", [Objs]), + io:format("~s~n", [string:centre(ObjsSStr, Width)]), + io:format("~s~n", [ToFrom]), + io:format("~s~n", [string:centre(ByteStr, Width)]). diff --git a/src/riak_kv_delete.erl b/src/riak_kv_delete.erl index 037112343a..8d9a4cc96d 100644 --- a/src/riak_kv_delete.erl +++ b/src/riak_kv_delete.erl @@ -31,6 +31,8 @@ -export([start_link/6, start_link/7, start_link/8, delete/8]). +-include("riak_kv_dtrace.hrl"). + start_link(ReqId, Bucket, Key, Options, Timeout, Client) -> {ok, proc_lib:spawn_link(?MODULE, delete, [ReqId, Bucket, Key, Options, Timeout, Client, undefined, @@ -52,8 +54,11 @@ start_link(ReqId, Bucket, Key, Options, Timeout, Client, ClientId, VClock) -> %% @doc Delete the object at Bucket/Key. Direct return value is uninteresting, %% see riak_client:delete/3 for expected gen_server replies to Client. delete(ReqId,Bucket,Key,Options,Timeout,Client,ClientId,undefined) -> + riak_core_dtrace:put_tag(io_lib:format("~p,~p", [Bucket, Key])), + ?DTRACE(?C_DELETE_INIT1, [0], []), case get_r_options(Bucket, Options) of {error, Reason} -> + ?DTRACE(?C_DELETE_INIT1, [-1], []), Client ! {ReqId, {error, Reason}}; {R, PR} -> RealStartTime = riak_core_util:moment(), @@ -63,14 +68,19 @@ delete(ReqId,Bucket,Key,Options,Timeout,Client,ClientId,undefined) -> RemainingTime = Timeout - (riak_core_util:moment() - RealStartTime), delete(ReqId,Bucket,Key,Options,RemainingTime,Client,ClientId,riak_object:vclock(OrigObj)); {error, notfound} -> + ?DTRACE(?C_DELETE_INIT1, [-2], []), Client ! {ReqId, {error, notfound}}; X -> + ?DTRACE(?C_DELETE_INIT1, [-3], []), Client ! {ReqId, X} end end; delete(ReqId,Bucket,Key,Options,Timeout,Client,ClientId,VClock) -> + riak_core_dtrace:put_tag(io_lib:format("~p,~p", [Bucket, Key])), + ?DTRACE(?C_DELETE_INIT2, [0], []), case get_w_options(Bucket, Options) of {error, Reason} -> + ?DTRACE(?C_DELETE_INIT2, [-1], []), Client ! {ReqId, {error, Reason}}; {W, PW, DW} -> Obj0 = riak_object:new(Bucket, Key, <<>>, dict:store(?MD_DELETED, @@ -81,10 +91,15 @@ delete(ReqId,Bucket,Key,Options,Timeout,Client,ClientId,VClock) -> Client ! {ReqId, Reply}, case Reply of ok -> + ?DTRACE(?C_DELETE_INIT2, [1], [<<"reap">>]), {ok, C2} = riak:local_client(), AsyncTimeout = 60*1000, % Avoid client-specified value - C2:get(Bucket, Key, all, AsyncTimeout); - _ -> nop + Res = C2:get(Bucket, Key, all, AsyncTimeout), + ?DTRACE(?C_DELETE_REAPER_GET_DONE, [1], [<<"reap">>]), + Res; + _ -> + ?DTRACE(?C_DELETE_INIT2, [2], [<<"nop">>]), + nop end end. @@ -164,26 +179,16 @@ get_w_options(Bucket, Options) -> end. - - - - - - - - - %% =================================================================== %% EUnit tests %% =================================================================== -ifdef(TEST). delete_test_() -> - cleanup(ignored_arg), %% Execute the test cases - { foreach, - fun setup/0, - fun cleanup/1, + {foreach, + setup(), + cleanup(), [ fun invalid_r_delete/0, fun invalid_rw_delete/0, @@ -280,75 +285,18 @@ invalid_pw_delete() -> ?assert(false) end. -setup() -> - %% Shut logging up - too noisy. - application:load(sasl), - application:set_env(sasl, sasl_error_logger, {file, "riak_kv_delete_test_sasl.log"}), - error_logger:tty(false), - error_logger:logfile({open, "riak_kv_delete_test.log"}), - %% Start erlang node - TestNode = list_to_atom("testnode" ++ integer_to_list(element(3, now())) ++ - integer_to_list(element(2, now()))), - case net_kernel:start([TestNode, shortnames]) of - {ok, _} -> - ok; - {error, {already_started, _}} -> - ok - end, - do_dep_apps(start, dep_apps()), - application:set_env(riak_core, default_bucket_props, [{r, quorum}, - {w, quorum}, {pr, 0}, {pw, 0}, {rw, quorum}, {n_val, 3}]), - %% There's some weird interaction with the quickcheck tests in put_fsm_eqc - %% that somehow makes the riak_kv_delete sup not be running if those tests - %% run before these. I'm sick of trying to figure out what is not being - %% cleaned up right, thus the following workaround. - case whereis(riak_kv_delete_sup) of - undefined -> - {ok, _} = riak_kv_delete_sup:start_link(); - _ -> - ok - end, - riak_kv_get_fsm_sup:start_link(), - timer:sleep(500). - -cleanup(_Pid) -> - do_dep_apps(stop, lists:reverse(dep_apps())), - catch exit(whereis(riak_kv_vnode_master), kill), %% Leaks occasionally - catch exit(whereis(riak_sysmon_filter), kill), %% Leaks occasionally - catch unlink(whereis(riak_kv_get_fsm_sup)), - catch unlink(whereis(riak_kv_delete_sup)), - catch exit(whereis(riak_kv_get_fsm_sup), kill), %% Leaks occasionally - catch exit(whereis(riak_kv_delete_sup), kill), %% Leaks occasionally - net_kernel:stop(), - %% Reset the riak_core vnode_modules - application:unset_env(riak_core, default_bucket_props), - application:unset_env(sasl, sasl_error_logger), - error_logger:tty(true), - application:set_env(riak_core, vnode_modules, []). +configure(load) -> + application:set_env(riak_core, default_bucket_props, + [{r, quorum}, {w, quorum}, {pr, 0}, {pw, 0}, + {rw, quorum}, {n_val, 3}]), + application:set_env(riak_kv, storage_backend, riak_kv_memory_backend); +configure(_) -> ok. -dep_apps() -> - SetupFun = - fun(start) -> - %% Set some missing env vars that are normally - %% part of release packaging. - application:set_env(riak_core, ring_creation_size, 64), - application:set_env(riak_kv, storage_backend, riak_kv_memory_backend), - %% Create a fresh ring for the test - Ring = riak_core_ring:fresh(), - riak_core_ring_manager:set_ring_global(Ring), +setup() -> + riak_kv_test_util:common_setup(?MODULE, fun configure/1). - %% Start riak_kv - timer:sleep(500); - (stop) -> - ok - end, - XX = fun(_) -> error_logger:info_msg("Registered: ~w\n", [lists:sort(registered())]) end, - [sasl, crypto, riak_sysmon, webmachine, XX, riak_core, XX, luke, erlang_js, - inets, mochiweb, os_mon, SetupFun, riak_kv]. +cleanup() -> + riak_kv_test_util:common_cleanup(?MODULE, fun configure/1). -do_dep_apps(StartStop, Apps) -> - lists:map(fun(A) when is_atom(A) -> application:StartStop(A); - (F) -> F(StartStop) - end, Apps). -endif. diff --git a/src/riak_kv_eleveldb_backend.erl b/src/riak_kv_eleveldb_backend.erl index 088c64f7c5..a0e7695b52 100644 --- a/src/riak_kv_eleveldb_backend.erl +++ b/src/riak_kv_eleveldb_backend.erl @@ -54,6 +54,7 @@ -record(state, {ref :: reference(), data_root :: string(), + open_opts = [], config :: config(), read_opts = [], write_opts = [], @@ -93,22 +94,25 @@ start(Partition, Config) -> %% Get the data root directory DataDir = filename:join(app_helper:get_prop_or_env(data_root, Config, eleveldb), integer_to_list(Partition)), - case open_db(DataDir, Config) of - {ok, Ref} -> - {ok, #state { ref = Ref, - data_root = DataDir, - read_opts = config_value(read_options, Config, []), - write_opts = config_value(write_options, Config, []), - fold_opts = config_value(fold_options, Config, [{fill_cache, false}]), - config = Config }}; + + %% Initialize state + S0 = init_state(DataDir, Config), + case open_db(S0) of + {ok, State} -> + {ok, State}; {error, Reason} -> {error, Reason} end. %% @doc Stop the eleveldb backend -spec stop(state()) -> ok. -stop(_State) -> - %% No-op; GC handles cleanup +stop(State) -> + case State#state.ref of + undefined -> + ok; + _ -> + eleveldb:close(State#state.ref) + end, ok. %% @doc Retrieve an object from the eleveldb backend @@ -277,17 +281,13 @@ fold_objects(FoldObjectsFun, Acc, Opts, #state{fold_opts=FoldOpts, %% @doc Delete all objects from this eleveldb backend %% and return a fresh reference. -spec drop(state()) -> {ok, state()} | {error, term(), state()}. -drop(#state{data_root=DataRoot}=State) -> - case eleveldb:destroy(DataRoot, []) of +drop(State0) -> + eleveldb:close(State0#state.ref), + case eleveldb:destroy(State0#state.data_root, []) of ok -> - case open_db(DataRoot, State#state.config) of - {ok, Ref} -> - {ok, State#state { ref = Ref }}; - {error, Reason} -> - {error, Reason, State} - end; + {ok, State0#state{ref = undefined}}; {error, Reason} -> - {error, Reason, State} + {error, Reason, State0} end. %% @doc Returns true if this eleveldb backend contains any @@ -300,7 +300,8 @@ is_empty(#state{ref=Ref}) -> -spec status(state()) -> [{atom(), term()}]. status(State) -> {ok, Stats} = eleveldb:status(State#state.ref, <<"leveldb.stats">>), - [{stats, Stats}]. + {ok, ReadBlockError} = eleveldb:status(State#state.ref, <<"leveldb.ReadBlockError">>), + [{stats, Stats}, {read_block_error, ReadBlockError}]. %% @doc Register an asynchronous callback -spec callback(reference(), any(), state()) -> {ok, state()}. @@ -312,40 +313,97 @@ callback(_Ref, _Msg, State) -> %% =================================================================== %% @private -open_db(DataRoot, Config) -> +init_state(DataRoot, Config) -> %% Get the data root directory filelib:ensure_dir(filename:join(DataRoot, "dummy")), + %% Merge the proplist passed in from Config with any values specified by the + %% eleveldb app level; precedence is given to the Config. + MergedConfig = orddict:merge(fun(_K, VLocal, _VGlobal) -> VLocal end, + orddict:from_list(Config), % Local + orddict:from_list(application:get_all_env(eleveldb))), % Global + %% Use a variable write buffer size in order to reduce the number %% of vnodes that try to kick off compaction at the same time %% under heavy uniform load... - WriteBufferMin = config_value(write_buffer_size_min, Config, 3 * 1024 * 1024), - WriteBufferMax = config_value(write_buffer_size_max, Config, 6 * 1024 * 1024), + WriteBufferMin = config_value(write_buffer_size_min, MergedConfig, 30 * 1024 * 1024), + WriteBufferMax = config_value(write_buffer_size_max, MergedConfig, 60 * 1024 * 1024), WriteBufferSize = WriteBufferMin + random:uniform(1 + WriteBufferMax - WriteBufferMin), - %% Assemble options... - Options = [ - {create_if_missing, true}, - {write_buffer_size, WriteBufferSize}, - {max_open_files, config_value(max_open_files, Config)}, - {cache_size, config_value(cache_size, Config)}, - {paranoid_checks, config_value(paranoid_checks, Config)} - ], + %% Update the write buffer size in the merged config and make sure create_if_missing is set + %% to true + FinalConfig = orddict:store(write_buffer_size, WriteBufferSize, + orddict:store(create_if_missing, true, MergedConfig)), + + %% Parse out the open/read/write options + {OpenOpts, _BadOpenOpts} = eleveldb:validate_options(open, FinalConfig), + {ReadOpts, _BadReadOpts} = eleveldb:validate_options(read, FinalConfig), + {WriteOpts, _BadWriteOpts} = eleveldb:validate_options(write, FinalConfig), - lager:debug("Opening LevelDB in ~s with options: ~p\n", [DataRoot, Options]), - eleveldb:open(DataRoot, Options). + %% Use read options for folding, but FORCE fill_cache to false + FoldOpts = lists:keystore(fill_cache, 1, ReadOpts, {fill_cache, false}), + %% Warn if block_size is set + SSTBS = proplists:get_value(sst_block_size, OpenOpts, false), + BS = proplists:get_value(block_size, OpenOpts, false), + case BS /= false andalso SSTBS == false of + true -> + lager:warning("eleveldb block_size has been renamed sst_block_size " + "and the current setting of ~p is being ignored. " + "Changing sst_block_size is strongly cautioned " + "against unless you know what you are doing. Remove " + "block_size from app.config to get rid of this " + "message.\n", [BS]); + _ -> + ok + end, + + %% Generate a debug message with the options we'll use for each operation + lager:debug("Datadir ~s options for LevelDB: ~p\n", + [DataRoot, [{open, OpenOpts}, {read, ReadOpts}, {write, WriteOpts}, {fold, FoldOpts}]]), + #state { data_root = DataRoot, + open_opts = OpenOpts, + read_opts = ReadOpts, + write_opts = WriteOpts, + fold_opts = FoldOpts, + config = FinalConfig }. %% @private -config_value(Key, Config) -> - config_value(Key, Config, undefined). +open_db(State) -> + RetriesLeft = app_helper:get_env(riak_kv, eleveldb_open_retries, 30), + open_db(State, max(1, RetriesLeft), undefined). + +open_db(_State0, 0, LastError) -> + {error, LastError}; +open_db(State0, RetriesLeft, _) -> + case eleveldb:open(State0#state.data_root, State0#state.open_opts) of + {ok, Ref} -> + {ok, State0#state { ref = Ref }}; + %% Check specifically for lock error, this can be caused if + %% a crashed vnode takes some time to flush leveldb information + %% out to disk. The process is gone, but the NIF resource cleanup + %% may not have completed. + {error, {db_open, OpenErr}=Reason} -> + case lists:prefix("IO error: lock ", OpenErr) of + true -> + SleepFor = app_helper:get_env(riak_kv, eleveldb_open_retry_delay, 2000), + lager:debug("Leveldb backend retrying ~p in ~p ms after error ~s\n", + [State0#state.data_root, SleepFor, OpenErr]), + timer:sleep(SleepFor), + open_db(State0, RetriesLeft - 1, Reason); + false -> + {error, Reason} + end; + {error, Reason} -> + {error, Reason} + end. %% @private config_value(Key, Config, Default) -> - case proplists:get_value(Key, Config) of - undefined -> - app_helper:get_env(eleveldb, Key, Default); - Value -> + case orddict:find(Key, Config) of + error -> + Default; + {ok, Value} -> Value end. @@ -509,6 +567,100 @@ custom_config_test_() -> application:set_env(eleveldb, data_root, ""), riak_kv_backend:standard_test(?MODULE, [{data_root, "test/eleveldb-backend"}]). +retry_test() -> + Root = "/tmp/eleveldb_retry_test", + try + {ok, State1} = start(42, [{data_root, Root}]), + Me = self(), + Pid1 = spawn_link(fun() -> + receive + stop -> + Me ! {1, stop(State1)} + end + end), + _Pid2 = spawn_link( + fun() -> + Me ! {2, running}, + Me ! {2, start(42, [{data_root, Root}])} + end), + %% Ensure Pid2 is runnng and give it 10ms to get into the open + %% so we know it has a lock clash + receive + {2, running} -> + timer:sleep(10); + X -> + throw({unexpected, X}) + after + 5000 -> + throw(timeout1) + end, + %% Tell Pid1 to shut it down + Pid1 ! stop, + receive + {1, ok} -> + ok; + X2 -> + throw({unexpected, X2}) + after + 5000 -> + throw(timeout2) + end, + %% Wait for Pid2 + receive + {2, {ok, _State2}} -> + ok; + {2, Res} -> + throw({notok, Res}); + X3 -> + throw({unexpected, X3}) + end + after + os:cmd("rm -rf " ++ Root) + end. + +retry_fail_test() -> + Root = "/tmp/eleveldb_fail_retry_test", + try + application:set_env(riak_kv, eleveldb_open_retries, 3), % 3 times, 1ms a time + application:set_env(riak_kv, eleveldb_open_retry_delay, 1), + {ok, State1} = start(42, [{data_root, Root}]), + Me = self(), + spawn_link( + fun() -> + Me ! {2, running}, + Me ! {2, start(42, [{data_root, Root}])} + end), + %% Ensure Pid2 is runnng and give it 10ms to get into the open + %% so we know it has a lock clash + receive + {2, running} -> + ok; + X -> + throw({unexpected, X}) + after + 5000 -> + throw(timeout1) + end, + %% Wait for Pid2 to fail + receive + {2, {error, {db_open, _Why}}} -> + ok; + {2, Res} -> + throw({expect_fail, Res}); + X3 -> + throw({unexpected, X3}) + end, + %% Then close and reopen, just for kicks to prove it was the locking + ok = stop(State1), + {ok, State2} = start(42, [{data_root, Root}]), + ok = stop(State2) + after + os:cmd("rm -rf " ++ Root), + application:unset_env(riak_kv, eleveldb_open_retries), + application:unset_env(riak_kv, eleveldb_open_retry_delay) + end. + + -ifdef(EQC). eqc_test_() -> @@ -522,8 +674,7 @@ eqc_test_() -> [?_assertEqual(true, backend_eqc:test(?MODULE, false, [{data_root, - "test/eleveldb-backend"}, - {async_folds, false}]))]}, + "test/eleveldb-backend"}]))]}, {timeout, 60000, [?_assertEqual(true, backend_eqc:test(?MODULE, false, diff --git a/src/riak_kv_encoding_migrate.erl b/src/riak_kv_encoding_migrate.erl index 02bb084195..2fa179781c 100644 --- a/src/riak_kv_encoding_migrate.erl +++ b/src/riak_kv_encoding_migrate.erl @@ -62,7 +62,6 @@ %% Check if the cluster contains encoded values that need to be migrated check_cluster() -> {ok, RC} = riak:local_client(), - riak_kv_mapred_cache:clear(), {ok, Buckets} = RC:list_buckets(), case Buckets of [] -> @@ -70,16 +69,16 @@ check_cluster() -> {empty, [], []}; _ -> EObjs = get_encoded_keys(RC), - check_cluster2(RC, EObjs) + check_cluster2(EObjs) end. -check_cluster2(_, []) -> +check_cluster2([]) -> io:format("Cluster does not contain URL encoded values. " "No migration needed.~n", []), {not_needed, [], []}; -check_cluster2(RC, EObjs) -> - case {check_safe(RC, EObjs), check_double_encoding(EObjs)} of +check_cluster2(EObjs) -> + case {check_safe(EObjs), check_double_encoding(EObjs)} of {{safe, _}, {false, _}} -> io:format("Cluster contains URL encoded values. " "Migration needed.~n", []), @@ -112,7 +111,8 @@ get_encoded_keys() -> get_encoded_keys(RC) -> {ok, Buckets} = RC:list_buckets(), EObjs = [begin - {ok, Objs} = RC:mapred(Bucket, [reduce_check_encoded()]), + {ok, Objs} = riak_kv_mrc_pipe:mapred( + Bucket, [reduce_check_encoded()]), Objs end || Bucket <- Buckets], lists:flatten(EObjs). @@ -146,9 +146,9 @@ check_double_encoding(EObjs) -> end. %% Determine if it is safe to perform migration (no bucket/key conflicts). -check_safe(RC, EObjs) -> +check_safe(EObjs) -> EObjs2 = [decode_name(Name) || Name <- EObjs], - MR = RC:mapred(EObjs2, [map_unsafe()]), + MR = riak_kv_mrc_pipe:mapred(EObjs2, [map_unsafe()]), case MR of {ok, []} -> {safe, []}; @@ -167,9 +167,7 @@ map_unsafe(RO, _, _) -> %% Perform first phase of migration: copying encoded values to %% unencoded equivalents. migrate_objects(EObjs) -> - {ok, RC} = riak:local_client(), - riak_kv_mapred_cache:clear(), - MR = RC:mapred(EObjs, [map_rewrite_encoded()]), + MR = riak_kv_mrc_pipe:mapred(EObjs, [map_rewrite_encoded()]), case MR of {ok, []} -> io:format("All objects with URL encoded buckets/keys have been " diff --git a/src/riak_kv_fsm_timing.erl b/src/riak_kv_fsm_timing.erl new file mode 100644 index 0000000000..263edc0a6e --- /dev/null +++ b/src/riak_kv_fsm_timing.erl @@ -0,0 +1,81 @@ +%% ------------------------------------------------------------------- +%% +%% riak_kv_fsm_timing: Common code for timing fsm states +%% +%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +%% @doc code that would otherwise be duplicated in both fsms +%% functions for gathering and calculating timing information +%% for fsm states. + +-module(riak_kv_fsm_timing). + +-export([add_timing/2, calc_timing/1]). + +-type timing() :: {StageName::atom(), StageStartTime::erlang:timestamp()}. +-type timings() :: [timing()]. +-type duration() :: {StageName::atom(), StageDuration::non_neg_integer()}. +-type durations() :: {ResponseUSecs::non_neg_integer(), [duration()]}. + +%% @doc add timing information of `{State, erlang:now()}' to the Timings + +-spec add_timing(atom(), timings()) -> timings(). +add_timing(State, Timings) when is_list(Timings) -> + [{State, os:timestamp()}|Timings]. + +%% --------------------------------------------------------------------- + +%% @doc Calc timing information - stored as `{Stage, StageStart}' +%% in reverse order. +%% +%% ResponseUsecs is calculated as time from reply to start of first stage. +%% If `reply' is in `stages' more than once, the earliest value is used. +%% If `reply' is not in `stages' fails with `badarg' +%% Since a stage's duration is the difference between it's start time +%% and the next stages start time, we don't calculate the duration of +%% the final stage, it is just there as the end time of the +%% penultimate stage + +-spec calc_timing(timings()) -> + durations(). +calc_timing(Stages0) -> + case proplists:get_value(reply, Stages0) of + undefined -> + erlang:error(badarg); + ReplyTime -> + [{_FinalStage, StageEnd}|Stages] = Stages0, + calc_timing(Stages, StageEnd, ReplyTime, orddict:new()) + end. + +%% A stages duration is the difference between it's start time +%% and the next stages start time. +-spec calc_timing(timings(), erlang:timestamp(), + erlang:timestamp(), + orddict:orddict()) -> + durations(). +calc_timing([], FirstStageStart, ReplyTime, Acc) -> + %% Time from first stage start until reply sent + {timer:now_diff(ReplyTime, FirstStageStart), orddict:to_list(Acc)}; +calc_timing([{Stage, StageStart} | Rest], StageEnd, ReplyTime, Acc0) -> + StageDuration = timer:now_diff(StageEnd, StageStart), + %% When the same stage appears more than once in + %% a list of timings() aggregate the times into + %% a total for that stage + Acc = orddict:update_counter(Stage, StageDuration, Acc0), + calc_timing(Rest, StageStart, ReplyTime, Acc). diff --git a/src/riak_kv_get_core.erl b/src/riak_kv_get_core.erl index 41503f8719..91929d5df1 100644 --- a/src/riak_kv_get_core.erl +++ b/src/riak_kv_get_core.erl @@ -20,7 +20,7 @@ %% %% ------------------------------------------------------------------- -module(riak_kv_get_core). --export([init/6, add_result/3, enough/1, response/1, +-export([init/6, add_result/3, result_shortcode/1, enough/1, response/1, has_all_results/1, final_action/1, info/1]). -export_type([getcore/0, result/0, reply/0, final_action/0]). @@ -30,8 +30,9 @@ -type reply() :: {ok, riak_object:riak_object()} | {error, notfound} | {error, any()}. +-type repair_reason() :: notfound | outofdate. -type final_action() :: nop | - {read_repair, [non_neg_integer()], riak_object:riak_object()} | + {read_repair, [{non_neg_integer() | repair_reason()}], riak_object:riak_object()} | delete. -type idxresult() :: {non_neg_integer(), result()}. @@ -86,7 +87,11 @@ add_result(Idx, Result, GetCore = #getcore{results = Results}) -> num_fail = GetCore#getcore.num_fail + 1} end. -%% Check if enough results have been added to respond +result_shortcode({ok, _RObj}) -> 1; +result_shortcode({error, notfound}) -> 0; +result_shortcode(_) -> -1. + +%% Check if enough results have been added to respond -spec enough(getcore()) -> boolean(). enough(#getcore{r = R, num_ok = NumOk, num_notfound = NumNotFound, @@ -122,24 +127,24 @@ response(GetCore = #getcore{r = R, num_ok = NumOk, num_notfound = NumNotFound, riak_kv_util:is_x_deleted(RObj)]), Fails = [F || F = {_Idx, {error, Reason}} <- Results, Reason /= notfound], - fail_reply(R, NumOk, NumOk - DelObjs, + fail_reply(R, NumOk, NumOk - DelObjs, NumNotFound + DelObjs, Fails) end, {Reply, GetCore#getcore{merged = Merged}}. %% Check if all expected results have been added -spec has_all_results(getcore()) -> boolean(). -has_all_results(#getcore{n = N, num_ok = NOk, +has_all_results(#getcore{n = N, num_ok = NOk, num_fail = NFail, num_notfound = NNF}) -> NOk + NFail + NNF >= N. %% Decide on any post-response actions %% nop - do nothing -%% {readrepair, Indices, MObj} - send read repairs iff any vnode has ancestor data +%% {readrepair, Indices, MObj} - send read repairs iff any vnode has ancestor data %% (including tombstones) %% delete - issue deletes if all vnodes returned tombstones. This needs to be %% supplemented with a check that the vnodes were all primaries. -%% +%% -spec final_action(getcore()) -> {final_action(), getcore()}. final_action(GetCore = #getcore{n = N, merged = Merged0, results = Results, allow_mult = AllowMult}) -> @@ -154,16 +159,16 @@ final_action(GetCore = #getcore{n = N, merged = Merged0, results = Results, notfound -> []; _ -> % ok or tombstone - [Idx || {Idx, {ok, RObj}} <- Results, + [{Idx, outofdate} || {Idx, {ok, RObj}} <- Results, strict_descendant(MObj, RObj)] ++ - [Idx || {Idx, {error, notfound}} <- Results] + [{Idx, notfound} || {Idx, {error, notfound}} <- Results] end, Action = case ReadRepairs of [] when ObjState == tombstone -> %% Allow delete if merge object is deleted, %% there are no read repairs pending and %% a value was received from all vnodes - case riak_kv_util:is_x_deleted(MObj) andalso + case riak_kv_util:is_x_deleted(MObj) andalso length([xx || {_Idx, {ok, _RObj}} <- Results]) == N of true -> delete; @@ -178,10 +183,10 @@ final_action(GetCore = #getcore{n = N, merged = Merged0, results = Results, {Action, GetCore#getcore{merged = Merged}}. %% Return request info --spec info(undefined | getcore()) -> [{vnode_oks, non_neg_integer()} | +-spec info(undefined | getcore()) -> [{vnode_oks, non_neg_integer()} | {vnode_errors, [any()]}]. - -info(undefined) -> + +info(undefined) -> []; % make uninitialized case easier info(#getcore{num_ok = NumOks, num_fail = NumFail, results = Results}) -> Oks = [{vnode_oks, NumOks}], @@ -199,9 +204,8 @@ info(#getcore{num_ok = NumOks, num_fail = NumFail, results = Results}) -> %% ==================================================================== strict_descendant(O1, O2) -> - dottedvv:descends(riak_object:vclock(O1),riak_object:vclock(O2)) andalso - not dottedvv:descends(riak_object:vclock(O2),riak_object:vclock(O1)). - + dottedvv:strict_descends(riak_object:vclock(O1),riak_object:vclock(O2)). + merge(Replies, AllowMult) -> RObjs = [RObj || {_I, {ok, RObj}} <- Replies], case RObjs of @@ -221,5 +225,3 @@ fail_reply(_R, _NumR, 0, NumNotFound, []) when NumNotFound > 0 -> {error, notfound}; fail_reply(R, NumR, _NumNotDeleted, _NumNotFound, _Fails) -> {error, {r_val_unsatisfied, R, NumR}}. - - diff --git a/src/riak_kv_get_fsm.erl b/src/riak_kv_get_fsm.erl index 64c865d153..87e01ce03e 100644 --- a/src/riak_kv_get_fsm.erl +++ b/src/riak_kv_get_fsm.erl @@ -38,7 +38,7 @@ -type option() :: {r, pos_integer()} | %% Minimum number of successful responses {pr, non_neg_integer()} | %% Minimum number of primary vnodes participating - {basic_quorum, boolean()} | %% Whether to use basic quorum (return early + {basic_quorum, boolean()} | %% Whether to use basic quorum (return early %% in some failure cases. {notfound_ok, boolean()} | %% Count notfound reponses as successful. {timeout, pos_integer() | infinity} | %% Timeout for vnode responses @@ -65,9 +65,15 @@ bkey :: {riak_object:bucket(), riak_object:key()}, bucket_props, startnow :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}, - get_usecs :: non_neg_integer() + get_usecs :: non_neg_integer(), + tracked_bucket=false :: boolean(), %% is per bucket stats enabled for this bucket + timing = [] :: [{atom(), erlang:timestamp()}], + calculated_timings :: {ResponseUSecs::non_neg_integer(), + [{StateName::atom(), TimeUSecs::non_neg_integer()}]} | undefined }). +-include("riak_kv_dtrace.hrl"). + -define(DEFAULT_TIMEOUT, 60000). -define(DEFAULT_R, default). -define(DEFAULT_PR, 0). @@ -84,10 +90,10 @@ start_link(ReqId,Bucket,Key,R,Timeout,From) -> start_link({raw, ReqId, From}, Bucket, Key, [{r, R}, {timeout, Timeout}]). %% @doc Start the get FSM - retrieve Bucket/Key with the options provided -%% +%% %% {r, pos_integer()} - Minimum number of successful responses %% {pr, non_neg_integer()} - Minimum number of primary vnodes participating -%% {basic_quorum, boolean()} - Whether to use basic quorum (return early +%% {basic_quorum, boolean()} - Whether to use basic quorum (return early %% in some failure cases. %% {notfound_ok, boolean()} - Count notfound reponses as successful. %% {timeout, pos_integer() | infinity} - Timeout for vnode responses @@ -106,7 +112,7 @@ start_link(From, Bucket, Key, GetOptions) -> %% n - N-value for request (is grabbed from bucket props in prepare) %% bucket_props - bucket properties %% preflist2 - [{{Idx,Node},primary|fallback}] preference list -%% +%% test_link(ReqId,Bucket,Key,R,Timeout,From,StateProps) -> test_link({raw, ReqId, From}, Bucket, Key, [{r, R}, {timeout, Timeout}], StateProps). @@ -121,11 +127,13 @@ test_link(From, Bucket, Key, GetOptions, StateProps) -> %% @private init([From, Bucket, Key, Options]) -> - StartNow = now(), - StateData = #state{from = From, - options = Options, - bkey = {Bucket, Key}, - startnow = StartNow}, + StartNow = os:timestamp(), + StateData = add_timing(prepare, #state{from = From, + options = Options, + bkey = {Bucket, Key}, + startnow = StartNow}), + riak_core_dtrace:put_tag(io_lib:format("~p,~p", [Bucket, Key])), + ?DTRACE(?C_GET_FSM_INIT, [], ["init"]), {ok, prepare, StateData, 0}; init({test, Args, StateProps}) -> %% Call normal init @@ -146,20 +154,24 @@ init({test, Args, StateProps}) -> %% @private prepare(timeout, StateData=#state{bkey=BKey={Bucket,_Key}}) -> + ?DTRACE(?C_GET_FSM_PREPARE, [], ["prepare"]), {ok, Ring} = riak_core_ring_manager:get_my_ring(), BucketProps = riak_core_bucket:get_bucket(Bucket, Ring), DocIdx = riak_core_util:chash_key(BKey), N = proplists:get_value(n_val,BucketProps), + StatTracked = proplists:get_value(stat_tracked, BucketProps, false), UpNodes = riak_core_node_watcher:nodes(riak_kv), Preflist2 = riak_core_apl:get_apl_ann(DocIdx, N, Ring, UpNodes), - {next_state, validate, StateData#state{starttime=riak_core_util:moment(), + new_state_timeout(validate, StateData#state{starttime=riak_core_util:moment(), n = N, bucket_props=BucketProps, - preflist2 = Preflist2}, 0}. + preflist2 = Preflist2, + tracked_bucket = StatTracked}). %% @private validate(timeout, StateData=#state{from = {raw, ReqId, _Pid}, options = Options, n = N, bucket_props = BucketProps, preflist2 = PL2}) -> + ?DTRACE(?C_GET_FSM_VALIDATE, [], ["validate"]), Timeout = get_option(timeout, Options, ?DEFAULT_TIMEOUT), R0 = get_option(r, Options, ?DEFAULT_R), PR0 = get_option(pr, Options, ?DEFAULT_PR), @@ -167,28 +179,10 @@ validate(timeout, StateData=#state{from = {raw, ReqId, _Pid}, options = Options, PR = riak_kv_util:expand_rw_value(pr, PR0, BucketProps, N), NumVnodes = length(PL2), NumPrimaries = length([x || {_,primary} <- PL2]), - if - R =:= error -> - client_reply({error, {r_val_violation, R0}}, StateData), - {stop, normal, StateData}; - R > N -> - client_reply({error, {n_val_violation, N}}, StateData), - {stop, normal, StateData}; - PR =:= error -> - client_reply({error, {pr_val_violation, PR0}}, StateData), - {stop, normal, StateData}; - PR > N -> - client_reply({error, {n_val_violation, N}}, StateData), - {stop, normal, StateData}; - PR > NumPrimaries -> - client_reply({error, {pr_val_unsatisfied, PR, NumPrimaries}}, StateData), - {stop, normal, StateData}; - R > NumVnodes -> - client_reply({error, {insufficient_vnodes, NumVnodes, need, R}}, StateData), - {stop, normal, StateData}; - true -> + case validate_quorum(R, R0, N, PR, PR0, NumPrimaries, NumVnodes) of + ok -> BQ0 = get_option(basic_quorum, Options, default), - FailThreshold = + FailThreshold = case riak_kv_util:expand_value(basic_quorum, BQ0, BucketProps) of true -> erlang:min((N div 2)+1, % basic quorum, or @@ -200,49 +194,91 @@ validate(timeout, StateData=#state{from = {raw, ReqId, _Pid}, options = Options, NFOk0 = get_option(notfound_ok, Options, default), NotFoundOk = riak_kv_util:expand_value(notfound_ok, NFOk0, BucketProps), DeletedVClock = get_option(deletedvclock, Options, false), - GetCore = riak_kv_get_core:init(N, R, FailThreshold, + GetCore = riak_kv_get_core:init(N, R, FailThreshold, NotFoundOk, AllowMult, DeletedVClock), - {next_state, execute, StateData#state{get_core = GetCore, - timeout = Timeout, - req_id = ReqId}, 0} + new_state_timeout(execute, StateData#state{get_core = GetCore, + timeout = Timeout, + req_id = ReqId}); + Error -> + StateData2 = client_reply(Error, StateData), + {stop, normal, StateData2} end. +%% @private validate the quorum values +%% {error, Message} or ok +validate_quorum(R, ROpt, _N, _PR, _PROpt, _NumPrimaries, _NumVnodes) when R =:= error -> + {error, {r_val_violation, ROpt}}; +validate_quorum(R, _ROpt, N, _PR, _PROpt, _NumPrimaries, _NumVnodes) when R > N -> + {error, {n_val_violation, N}}; +validate_quorum(_R, _ROpt, _N, PR, PROpt, _NumPrimaries, _NumVnodes) when PR =:= error -> + {error, {pr_val_violation, PROpt}}; +validate_quorum(_R, _ROpt, N, PR, _PROpt, _NumPrimaries, _NumVnodes) when PR > N -> + {error, {n_val_violation, N}}; +validate_quorum(_R, _ROpt, _N, PR, _PROpt, NumPrimaries, _NumVnodes) when PR > NumPrimaries -> + {error, {pr_val_unsatisfied, PR, NumPrimaries}}; +validate_quorum(R, _ROpt, _N, _PR, _PROpt, _NumPrimaries, NumVnodes) when R > NumVnodes -> + {error, {insufficient_vnodes, NumVnodes, need, R}}; +validate_quorum(_R, _ROpt, _N, _PR, _PROpt, _NumPrimaries, _NumVnodes) -> + ok. + %% @private execute(timeout, StateData0=#state{timeout=Timeout,req_id=ReqId, - bkey=BKey, + bkey=BKey, preflist2 = Preflist2}) -> + ?DTRACE(?C_GET_FSM_EXECUTE, [], ["execute"]), TRef = schedule_timeout(Timeout), Preflist = [IndexNode || {IndexNode, _Type} <- Preflist2], + Ps = preflist_for_tracing(Preflist), + ?DTRACE(?C_GET_FSM_PREFLIST, [], Ps), riak_kv_vnode:get(Preflist, BKey, ReqId), StateData = StateData0#state{tref=TRef}, - {next_state,waiting_vnode_r,StateData}. + new_state(waiting_vnode_r, StateData). + +%% @private calculate a concatenated preflist for tracing macro +preflist_for_tracing(Preflist) -> + %% TODO: We can see entire preflist (more than 4 nodes) if we concatenate + %% all info into a single string. + [if is_atom(Nd) -> + [atom2list(Nd), $,, integer_to_list(Idx)]; + true -> + <<>> % eunit test + end || {Idx, Nd} <- lists:sublist(Preflist, 4)]. %% @private waiting_vnode_r({r, VnodeResult, Idx, _ReqId}, StateData = #state{get_core = GetCore}) -> + ShortCode = riak_kv_get_core:result_shortcode(VnodeResult), + IdxStr = integer_to_list(Idx), + ?DTRACE(?C_GET_FSM_WAITING_R, [ShortCode], ["waiting_vnode_r", IdxStr]), UpdGetCore = riak_kv_get_core:add_result(Idx, VnodeResult, GetCore), case riak_kv_get_core:enough(UpdGetCore) of true -> {Reply, UpdGetCore2} = riak_kv_get_core:response(UpdGetCore), - NewStateData2 = update_timing(StateData#state{get_core = UpdGetCore2}), - client_reply(Reply, NewStateData2), - update_stats(Reply, NewStateData2), - maybe_finalize(NewStateData2); + NewStateData = client_reply(Reply, StateData#state{get_core = UpdGetCore2}), + update_stats(Reply, NewStateData), + maybe_finalize(NewStateData); false -> - {next_state, waiting_vnode_r, StateData#state{get_core = UpdGetCore}} + %% don't use new_state/2 since we do timing per state, not per message in state + {next_state, waiting_vnode_r, StateData#state{get_core = UpdGetCore}} end; waiting_vnode_r(request_timeout, StateData) -> - S2 = update_timing(StateData), + ?DTRACE(?C_GET_FSM_WAITING_R_TIMEOUT, [-2], ["waiting_vnode_r", "timeout"]), + S2 = client_reply({error,timeout}, StateData), update_stats(timeout, S2), - client_reply({error,timeout}, S2), finalize(S2). %% @private waiting_read_repair({r, VnodeResult, Idx, _ReqId}, StateData = #state{get_core = GetCore}) -> + ShortCode = riak_kv_get_core:result_shortcode(VnodeResult), + IdxStr = integer_to_list(Idx), + ?DTRACE(?C_GET_FSM_WAITING_RR, [ShortCode], + ["waiting_read_repair", IdxStr]), UpdGetCore = riak_kv_get_core:add_result(Idx, VnodeResult, GetCore), maybe_finalize(StateData#state{get_core = UpdGetCore}); waiting_read_repair(request_timeout, StateData) -> + ?DTRACE(?C_GET_FSM_WAITING_RR_TIMEOUT, [-2], + ["waiting_read_repair", "timeout"]), finalize(StateData). %% @private @@ -267,11 +303,20 @@ terminate(Reason, _StateName, _State) -> %% @private code_change(_OldVsn, StateName, State, _Extra) -> {ok, StateName, State}. - + %% ==================================================================== %% Internal functions %% ==================================================================== +%% Move to the new state, marking the time it started +new_state(StateName, StateData) -> + {next_state, StateName, add_timing(StateName, StateData)}. + +%% Move to the new state, marking the time it started and trigger an immediate +%% timeout. +new_state_timeout(StateName, StateData) -> + {next_state, StateName, add_timing(StateName, StateData), 0}. + maybe_finalize(StateData=#state{get_core = GetCore}) -> case riak_kv_get_core:has_all_results(GetCore) of true -> finalize(StateData); @@ -287,6 +332,7 @@ finalize(StateData=#state{get_core = GetCore}) -> {read_repair, Indices, RepairObj} -> read_repair(Indices, RepairObj, UpdStateData); _Nop -> + ?DTRACE(?C_GET_FSM_FINALIZE, [], ["finalize"]), ok end, {stop,normal,StateData}. @@ -300,8 +346,12 @@ maybe_delete(_StateData=#state{n = N, preflist2=Sent, IdealNodes = [{I, Node} || {{I, Node}, primary} <- Sent], case length(IdealNodes) == N of true -> + ?DTRACE(?C_GET_FSM_MAYBE_DELETE, [1], + ["maybe_delete", "triggered"]), riak_kv_vnode:del(IdealNodes, BKey, ReqId); _ -> + ?DTRACE(?C_GET_FSM_MAYBE_DELETE, [0], + ["maybe_delete", "nop"]), nop end. @@ -309,12 +359,14 @@ maybe_delete(_StateData=#state{n = N, preflist2=Sent, read_repair(Indices, RepairObj, #state{req_id = ReqId, starttime = StartTime, preflist2 = Sent, bkey = BKey, bucket_props = BucketProps}) -> - RepairPreflist = [{Idx, Node} || {{Idx, Node}, _Type} <- Sent, - lists:member(Idx, Indices)], - riak_kv_vnode:readrepair(RepairPreflist, BKey, RepairObj, ReqId, + RepairPreflist = [{Idx, Node} || {{Idx, Node}, _Type} <- Sent, + proplists:get_value(Idx, Indices) /= undefined], + Ps = preflist_for_tracing(RepairPreflist), + ?DTRACE(?C_GET_FSM_RR, [], Ps), + riak_kv_vnode:readrepair(RepairPreflist, BKey, RepairObj, ReqId, StartTime, [{returnbody, false}, {bucket_props, BucketProps}]), - riak_kv_stat:update(read_repairs). + riak_kv_stat:update({read_repairs, Indices, Sent}). get_option(Name, Options, Default) -> @@ -325,7 +377,9 @@ schedule_timeout(infinity) -> schedule_timeout(Timeout) -> erlang:send_after(Timeout, self(), request_timeout). -client_reply(Reply, StateData = #state{from = {raw, ReqId, Pid}, options = Options}) -> +client_reply(Reply, StateData0 = #state{from = {raw, ReqId, Pid}, + options = Options}) -> + StateData = add_timing(reply, StateData0), Msg = case proplists:get_value(details, Options, false) of false -> {ReqId, Reply}; @@ -336,27 +390,33 @@ client_reply(Reply, StateData = #state{from = {raw, ReqId, Pid}, options = Optio Info = client_info(Details, StateData, []), {ReqId, {OkError, ObjReason, Info}} end, - Pid ! Msg. - -update_timing(StateData = #state{startnow = StartNow}) -> - EndNow = now(), - StateData#state{get_usecs = timer:now_diff(EndNow, StartNow)}. - -update_stats({ok, Obj}, #state{get_usecs = GetUsecs}) -> - %% Get the number of siblings and the object size. For object - %% size, get an approximation by adding together the bucket, key, - %% vectorclock, and all of the siblings. This is more complex than - %% calling term_to_binary/1, but it should be easier on memory, - %% especially for objects with large values. + Pid ! Msg, + ShortCode = riak_kv_get_core:result_shortcode(Reply), + %% calculate timings here, since the trace macro needs total response time + %% Stuff the result in state so we don't need to calculate it again + {ResponseUSecs, Stages} = riak_kv_fsm_timing:calc_timing(StateData#state.timing), + ?DTRACE(?C_GET_FSM_CLIENT_REPLY, [ShortCode, ResponseUSecs], ["client_reply"]), + StateData#state{calculated_timings={ResponseUSecs, Stages}}. + +update_stats({ok, Obj}, #state{tracked_bucket = StatTracked, calculated_timings={ResponseUSecs, Stages}}) -> + %% Stat the number of siblings and the object size, and timings NumSiblings = riak_object:value_count(Obj), + Bucket = riak_object:bucket(Obj), + ObjSize = calculate_objsize(Bucket, Obj), + riak_kv_stat:update({get_fsm, Bucket, ResponseUSecs, Stages, NumSiblings, ObjSize, StatTracked}); +update_stats(_, #state{ bkey = {Bucket, _}, tracked_bucket = StatTracked, calculated_timings={ResponseUSecs, Stages}}) -> + riak_kv_stat:update({get_fsm, Bucket, ResponseUSecs, Stages, undefined, undefined, StatTracked}). + +%% Get an approximation of object size by adding together the bucket, key, +%% vectorclock, and all of the siblings. This is more complex than +%% calling term_to_binary/1, but it should be easier on memory, +%% especially for objects with large values. +calculate_objsize(Bucket, Obj) -> Contents = riak_object:get_contents(Obj), - ObjSize = - size(riak_object:bucket(Obj)) + + size(Bucket) + size(riak_object:key(Obj)) + - lists:sum([size(term_to_binary(MD)) + value_size(Value) + size(term_to_binary(Clock)) || {MD, Value, Clock} <- Contents]), - riak_kv_stat:update({get_fsm, undefined, GetUsecs, NumSiblings, ObjSize}); -update_stats(_, #state{get_usecs = GetUsecs}) -> - riak_kv_stat:update({get_fsm, undefined, GetUsecs, undefined, undefined}). + size(term_to_binary(riak_object:vclock(Obj))) + + lists:sum([size(term_to_binary(MD)) + value_size(Value) || {MD, Value} <- Contents]). value_size(Value) when is_binary(Value) -> size(Value); value_size(Value) -> size(term_to_binary(Value)). @@ -365,21 +425,31 @@ client_info(true, StateData, Acc) -> client_info(details(), StateData, Acc); client_info([], _StateData, Acc) -> Acc; -client_info([timing | Rest], StateData = #state{get_usecs = GetUsecs}, Acc) -> - client_info(Rest, StateData, [{duration, GetUsecs} | Acc]); +client_info([timing | Rest], StateData = #state{timing=Timing}, Acc) -> + {ResponseUsecs, Stages} = riak_kv_fsm_timing:calc_timing(Timing), + client_info(Rest, StateData, [{response_usecs, ResponseUsecs}, + {stages, Stages} | Acc]); client_info([vnodes | Rest], StateData = #state{get_core = GetCore}, Acc) -> Info = riak_kv_get_core:info(GetCore), client_info(Rest, StateData, Info ++ Acc); client_info([Unknown | Rest], StateData, Acc) -> client_info(Rest, StateData, [{Unknown, unknown_detail} | Acc]). +%% Add timing information to the state +add_timing(Stage, State = #state{timing = Timing}) -> + State#state{timing = riak_kv_fsm_timing:add_timing(Stage, Timing)}. details() -> [timing, vnodes]. +atom2list(A) when is_atom(A) -> + atom_to_list(A); +atom2list(P) when is_pid(P)-> + pid_to_list(P). % eunit tests + -ifdef(TEST). --define(expect_msg(Exp,Timeout), +-define(expect_msg(Exp,Timeout), ?assertEqual(Exp, receive Exp -> Exp after Timeout -> timeout end)). %% SLF: Comment these test cases because of OTP app dependency @@ -414,7 +484,7 @@ setup() -> riak_core_tracer:reset(), riak_core_tracer:filter([{riak_kv_vnode, readrepair}], fun({trace, _Pid, call, - {riak_kv_vnode, readrepair, + {riak_kv_vnode, readrepair, [Preflist, _BKey, Obj, ReqId, _StartTime, _Options]}}) -> [{rr, Preflist, Obj, ReqId}] end), @@ -425,10 +495,10 @@ cleanup(_) -> happy_path_case() -> riak_core_tracer:collect(5000), - + %% Start 3 vnodes Indices = [1, 2, 3], - Preflist2 = [begin + Preflist2 = [begin {ok, Pid} = riak_kv_vnode:test_vnode(Idx), {{Idx, Pid}, primary} end || Idx <- Indices], @@ -451,7 +521,7 @@ happy_path_case() -> {bucket_props, BucketProps}, {preflist2, Preflist2}]), ?assertEqual({error, notfound}, wait_for_reqid(ReqId1, Timeout + 1000)), - + %% Update the first two vnodes with a value ReqId2 = 49906465, Value = <<"value">>, @@ -462,7 +532,7 @@ happy_path_case() -> ?expect_msg({ReqId2, {w, 2, ReqId2}}, Timeout + 1000), ?expect_msg({ReqId2, {dw, 1, ReqId2}}, Timeout + 1000), ?expect_msg({ReqId2, {dw, 2, ReqId2}}, Timeout + 1000), - + %% Issue a get, check value returned. ReqId3 = 30031523, {ok, _FsmPid2} = test_link(ReqId3, Bucket, Key, R, Timeout, self(), @@ -490,7 +560,7 @@ n_val_violation_case() -> BucketProps = bucket_props(Bucket, Nval), %% Fake three nodes Indices = [1, 2, 3], - Preflist2 = [begin + Preflist2 = [begin {{Idx, self()}, primary} end || Idx <- Indices], {ok, _FsmPid1} = test_link(ReqId1, Bucket, Key, R, Timeout, self(), @@ -499,8 +569,8 @@ n_val_violation_case() -> {bucket_props, BucketProps}, {preflist2, Preflist2}]), ?assertEqual({error, {n_val_violation, 3}}, wait_for_reqid(ReqId1, Timeout + 1000)). - - + + wait_for_reqid(ReqId, Timeout) -> receive {ReqId, Msg} -> Msg @@ -525,7 +595,7 @@ bucket_props(Bucket, Nval) -> % riak_core_bucket:get_bucket(Bucket). {small_vclock,50}, {w,quorum}, {young_vclock,20}]. - + -endif. % BROKEN_EUNIT_PURITY_VIOLATION -endif. diff --git a/src/riak_kv_index_fsm.erl b/src/riak_kv_index_fsm.erl index 6ec7691c89..b370ee0caa 100644 --- a/src/riak_kv_index_fsm.erl +++ b/src/riak_kv_index_fsm.erl @@ -42,80 +42,77 @@ -export([init/2, process_results/2, finish/2]). +-export([use_ack_backpressure/0, + req/3]). -type from() :: {atom(), req_id(), pid()}. -type req_id() :: non_neg_integer(). --record(state, {client_type :: plain | mapred, - from :: from()}). +-record(state, {from :: from()}). + +%% @doc Returns `true' if the new ack-based backpressure index +%% protocol should be used. This decision is based on the +%% `index_backpressure' setting in `riak_kv''s application +%% environment. +-spec use_ack_backpressure() -> boolean(). +use_ack_backpressure() -> + riak_core_capability:get({riak_kv, index_backpressure}, false) == true. + +%% @doc Construct the correct index command record. +-spec req(binary(), term(), term()) -> term(). +req(Bucket, ItemFilter, Query) -> + case use_ack_backpressure() of + true -> + ?KV_INDEX_REQ{bucket=Bucket, + item_filter=ItemFilter, + qry=Query}; + false -> + #riak_kv_index_req_v1{bucket=Bucket, + item_filter=ItemFilter, + qry=Query} + end. %% @doc Return a tuple containing the ModFun to call per vnode, %% the number of primary preflist vnodes the operation %% should cover, the service to use to check for available nodes, %% and the registered name to use to access the vnode master process. -init(From={_, _, ClientPid}, [Bucket, ItemFilter, Query, Timeout, ClientType]) -> - case ClientType of - %% Link to the mapred job so we die if the job dies - mapred -> - link(ClientPid); - _ -> - ok - end, +init(From={_, _, _}, [Bucket, ItemFilter, Query, Timeout]) -> %% Get the bucket n_val for use in creating a coverage plan BucketProps = riak_core_bucket:get_bucket(Bucket), NVal = proplists:get_value(n_val, BucketProps), %% Construct the key listing request - Req = ?KV_INDEX_REQ{bucket=Bucket, - item_filter=ItemFilter, - qry=Query}, + Req = req(Bucket, ItemFilter, Query), {Req, all, NVal, 1, riak_kv, riak_kv_vnode_master, Timeout, - #state{client_type=ClientType, from=From}}. + #state{from=From}}. process_results({error, Reason}, _State) -> {error, Reason}; +process_results({From, Bucket, Results}, + StateData=#state{from={raw, ReqId, ClientPid}}) -> + process_query_results(Bucket, Results, ReqId, ClientPid), + riak_kv_vnode:ack_keys(From), % tell that vnode we're ready for more + {ok, StateData}; process_results({Bucket, Results}, - StateData=#state{client_type=ClientType, - from={raw, ReqId, ClientPid}}) -> - process_query_results(ClientType, Bucket, Results, ReqId, ClientPid), + StateData=#state{from={raw, ReqId, ClientPid}}) -> + process_query_results(Bucket, Results, ReqId, ClientPid), {ok, StateData}; process_results(done, StateData) -> {done, StateData}. finish({error, Error}, - StateData=#state{from={raw, ReqId, ClientPid}, - client_type=ClientType}) -> - case ClientType of - mapred -> - %% An error occurred or the timeout interval elapsed - %% so all we can do now is die so that the rest of the - %% MapReduce processes will also die and be cleaned up. - exit(Error); - plain -> - %% Notify the requesting client that an error - %% occurred or the timeout has elapsed. - ClientPid ! {ReqId, {error, Error}} - end, + StateData=#state{from={raw, ReqId, ClientPid}}) -> + %% Notify the requesting client that an error + %% occurred or the timeout has elapsed. + ClientPid ! {ReqId, {error, Error}}, {stop, normal, StateData}; finish(clean, - StateData=#state{from={raw, ReqId, ClientPid}, - client_type=ClientType}) -> - case ClientType of - mapred -> - luke_flow:finish_inputs(ClientPid); - plain -> - ClientPid ! {ReqId, done} - end, + StateData=#state{from={raw, ReqId, ClientPid}}) -> + ClientPid ! {ReqId, done}, {stop, normal, StateData}. %% =================================================================== %% Internal functions %% =================================================================== -process_query_results(plain, _Bucket, Results, ReqId, ClientPid) -> - ClientPid ! {ReqId, {results, Results}}; -process_query_results(mapred, Bucket, Results, _ReqId, ClientPid) -> - try - luke_flow:add_inputs(ClientPid, [{Bucket, Result} || Result <- Results]) - catch _:_ -> - exit(self(), normal) - end. +process_query_results(_Bucket, Results, ReqId, ClientPid) -> + ClientPid ! {ReqId, {results, Results}}. diff --git a/src/riak_kv_js_manager.erl b/src/riak_kv_js_manager.erl index 521424f6b9..56d04cd46e 100644 --- a/src/riak_kv_js_manager.erl +++ b/src/riak_kv_js_manager.erl @@ -121,11 +121,6 @@ handle_call({mark_idle, VM}, _From, #state{master=Master, handle_call(reload_vms, _From, #state{master=Master, idle=Idle}=State) -> reload_idle_vms(Idle), mark_pending_reloads(Master, Idle), - if State#state.name == ?JSPOOL_MAP -> - riak_kv_mapred_cache:clear(); - true -> - ok - end, {reply, ok, State}; handle_call({reserve_batch_vm, Owner}, _From, State) -> diff --git a/src/riak_kv_keylister_legacy.erl b/src/riak_kv_keylister_legacy.erl deleted file mode 100644 index 8a93e2ce8e..0000000000 --- a/src/riak_kv_keylister_legacy.erl +++ /dev/null @@ -1,125 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_keylister_legacy: Manage streaming keys for a bucket from a -%% cluster node -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(riak_kv_keylister_legacy). - --behaviour(gen_fsm). - -%% API --export([start_link/3, - list_keys/2]). - -%% States --export([waiting/2]). - -%% gen_fsm callbacks --export([init/1, state_name/2, state_name/3, handle_event/3, - handle_sync_event/4, handle_info/3, terminate/3, code_change/4]). - --record(state, {reqid, - caller, - bucket, - filter, - bloom}). - -list_keys(ListerPid, VNode) -> - gen_fsm:send_event(ListerPid, {lk, VNode}). - -start_link(ReqId, Caller, Bucket) -> - gen_fsm:start_link(?MODULE, [ReqId, Caller, Bucket], []). - -init([ReqId, Caller, Inputs]) -> - erlang:monitor(process, Caller), - {ok, Bloom} = ebloom:new(10000000, 0.0001, crypto:rand_uniform(1, 5000)), - {Bucket, Filter} = build_filter(Inputs), - {ok, waiting, #state{reqid=ReqId, caller=Caller, bloom=Bloom, bucket=Bucket, - filter=Filter}}. - -waiting({lk, VNode}, #state{reqid=ReqId, bucket=Bucket}=State) -> - riak_kv_vnode:list_keys(VNode, ReqId, self(), Bucket), - {next_state, waiting, State}. - -state_name(_Event, State) -> - {next_state, waiting, State}. - -state_name(_Event, _From, State) -> - {reply, ignored, state_name, State}. - -handle_event(_Event, StateName, State) -> - {next_state, StateName, State}. - -handle_sync_event(_Event, _From, StateName, State) -> - {reply, ignored, StateName, State}. - -handle_info({ReqId, {kl, Idx, Keys0}}, waiting, #state{reqid=ReqId, bloom=Bloom, - filter=Filter, caller=Caller}=State) -> - F = fun(Key, Acc) -> - case ebloom:contains(Bloom, Key) of - true -> - Acc; - false -> - case is_function(Filter) of - true -> - case Filter(Key) of - true -> - ebloom:insert(Bloom, Key), - [Key|Acc]; - false -> - Acc - end; - false -> - [Key|Acc] - end end end, - case lists:foldl(F, [], Keys0) of - [] -> - ok; - Keys -> - gen_fsm:send_event(Caller, {ReqId, {kl, Idx, Keys}}) - end, - {next_state, waiting, State}; -handle_info({ReqId, Idx, done}, waiting, #state{reqid=ReqId, caller=Caller}=State) -> - gen_fsm:send_event(Caller, {ReqId, Idx, done}), - {next_state, waiting, State}; -handle_info({'DOWN', _MRef, _Type, Caller, _Info}, waiting, #state{caller=Caller}=State) -> - {stop, normal, State}; -handle_info(_Info, StateName, State) -> - {next_state, StateName, State}. - -terminate(_Reason, _StateName, #state{bloom=Bloom}) -> - ebloom:clear(Bloom), - ok. - -code_change(_OldVsn, StateName, State, _Extra) -> - {ok, StateName, State}. - -%% Internal functions -build_filter('_') -> - {'_', []}; -build_filter(Bucket) when is_binary(Bucket) -> - {Bucket, []}; -build_filter({filter, Bucket, Fun}) when is_function(Fun) -> - %% this is the representation used by riak_client:filter_keys - {Bucket, Fun}; -build_filter({Bucket, Filters}) -> - FilterFun = riak_kv_mapred_filters:compose(Filters), - {Bucket, FilterFun}. diff --git a/src/riak_kv_keylister_legacy_sup.erl b/src/riak_kv_keylister_legacy_sup.erl deleted file mode 100644 index b4ff48ec06..0000000000 --- a/src/riak_kv_keylister_legacy_sup.erl +++ /dev/null @@ -1,48 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_keylister_legacy_sup: Supervisor for starting legacy keylister processes -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(riak_kv_keylister_legacy_sup). - --behaviour(supervisor). - -%% API --export([start_link/0, - new_lister/3]). - -%% Supervisor callbacks --export([init/1]). - -new_lister(ReqId, Bucket, Caller) -> - start_child([ReqId, Bucket, Caller]). - -start_link() -> - supervisor:start_link({local, ?MODULE}, ?MODULE, []). - -init([]) -> - SupFlags = {simple_one_for_one, 0, 1}, - Process = {undefined, - {riak_kv_keylister_legacy, start_link, []}, - temporary, brutal_kill, worker, dynamic}, - {ok, {SupFlags, [Process]}}. - -%% Internal functions -start_child(Args) -> - supervisor:start_child(?MODULE, Args). diff --git a/src/riak_kv_keylister_master.erl b/src/riak_kv_keylister_master.erl deleted file mode 100644 index 4c30d4fcd4..0000000000 --- a/src/riak_kv_keylister_master.erl +++ /dev/null @@ -1,80 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_keylister_master: Starts legacy keylister processes on demand -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(riak_kv_keylister_master). - --behaviour(gen_server). - -%% API --export([start_link/0, - start_keylist/3, - start_keylist/4]). - -%% gen_server callbacks --export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3]). - --define(SERVER, ?MODULE). --define(DEFAULT_TIMEOUT, 5000). - --record(state, {}). - -start_keylist(Node, ReqId, Bucket) -> - start_keylist(Node, ReqId, Bucket, ?DEFAULT_TIMEOUT). - -start_keylist(Node, ReqId, Bucket, Timeout) -> - try - case gen_server:call({?SERVER, Node}, {start_kl, ReqId, self(), Bucket}, Timeout) of - {ok, Pid} -> - {ok, Pid}; - Error -> - Error - end - catch - exit:{timeout, _} -> - {error, timeout} - end. - - -start_link() -> - gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). - -init([]) -> - {ok, #state{}}. - -handle_call({start_kl, ReqId, Caller, Bucket}, _From, State) -> - Reply = riak_kv_keylister_legacy_sup:new_lister(ReqId, Caller, Bucket), - {reply, Reply, State}; - -handle_call(_Request, _From, State) -> - {reply, ignore, State}. - -handle_cast(_Msg, State) -> - {noreply, State}. - -handle_info(_Info, State) -> - {noreply, State}. - -terminate(_Reason, _State) -> - ok. - -code_change(_OldVsn, State, _Extra) -> - {ok, State}. diff --git a/src/riak_kv_keys_fsm.erl b/src/riak_kv_keys_fsm.erl index 95608e1d3e..2491ab0fd4 100644 --- a/src/riak_kv_keys_fsm.erl +++ b/src/riak_kv_keys_fsm.erl @@ -49,8 +49,9 @@ -type from() :: {atom(), req_id(), pid()}. -type req_id() :: non_neg_integer(). --record(state, {client_type :: plain | mapred, - from :: from()}). +-record(state, {from :: from()}). + +-include("riak_kv_dtrace.hrl"). %% @doc Returns `true' if the new ack-based backpressure listkeys %% protocol should be used. This decision is based on the @@ -58,7 +59,7 @@ %% environment. -spec use_ack_backpressure() -> boolean(). use_ack_backpressure() -> - app_helper:get_env(riak_kv, listkeys_backpressure) == true. + riak_core_capability:get({riak_kv, listkeys_backpressure}, false) == true. %% @doc Construct the correct listkeys command record. -spec req(binary(), term()) -> term(). @@ -76,62 +77,54 @@ req(Bucket, ItemFilter) -> %% the number of primary preflist vnodes the operation %% should cover, the service to use to check for available nodes, %% and the registered name to use to access the vnode master process. -init(From={_, _, ClientPid}, [Bucket, ItemFilter, Timeout, ClientType]) -> - case ClientType of - %% Link to the mapred job so we die if the job dies - mapred -> - link(ClientPid); - _ -> - ok - end, +init(From={_, _, ClientPid}, [Bucket, ItemFilter, Timeout]) -> + riak_core_dtrace:put_tag(io_lib:format("~p", [Bucket])), + ClientNode = atom_to_list(node(ClientPid)), + PidStr = pid_to_list(ClientPid), + FilterX = if ItemFilter == none -> 0; + true -> 1 + end, + %% "other" is a legacy term from when MapReduce used this FSM (in + %% which case, the string "mapred" would appear + ?DTRACE(?C_KEYS_INIT, [2, FilterX], + [<<"other">>, ClientNode, PidStr]), %% Get the bucket n_val for use in creating a coverage plan BucketProps = riak_core_bucket:get_bucket(Bucket), NVal = proplists:get_value(n_val, BucketProps), %% Construct the key listing request Req = req(Bucket, ItemFilter), {Req, all, NVal, 1, riak_kv, riak_kv_vnode_master, Timeout, - #state{client_type=ClientType, from=From}}. + #state{from=From}}. process_results({From, Bucket, Keys}, - StateData=#state{client_type=ClientType, - from={raw, ReqId, ClientPid}}) -> - process_keys(ClientType, Bucket, Keys, ReqId, ClientPid), + StateData=#state{from={raw, ReqId, ClientPid}}) -> + %% TODO: have caller give us the Idx number. + ?DTRACE(?C_KEYS_PROCESS_RESULTS, [length(Keys)], []), + process_keys(Bucket, Keys, ReqId, ClientPid), riak_kv_vnode:ack_keys(From), % tell that vnode we're ready for more {ok, StateData}; process_results({Bucket, Keys}, - StateData=#state{client_type=ClientType, - from={raw, ReqId, ClientPid}}) -> - process_keys(ClientType, Bucket, Keys, ReqId, ClientPid), + StateData=#state{from={raw, ReqId, ClientPid}}) -> + ?DTRACE(?C_KEYS_PROCESS_RESULTS, [length(Keys)], []), + process_keys(Bucket, Keys, ReqId, ClientPid), {ok, StateData}; process_results(done, StateData) -> {done, StateData}; process_results({error, Reason}, _State) -> + ?DTRACE(?C_KEYS_PROCESS_RESULTS, [-1], []), {error, Reason}. finish({error, Error}, - StateData=#state{from={raw, ReqId, ClientPid}, - client_type=ClientType}) -> - case ClientType of - mapred -> - %% An error occurred or the timeout interval elapsed - %% so all we can do now is die so that the rest of the - %% MapReduce processes will also die and be cleaned up. - exit(Error); - plain -> - %% Notify the requesting client that an error - %% occurred or the timeout has elapsed. - ClientPid ! {ReqId, Error} - end, + StateData=#state{from={raw, ReqId, ClientPid}}) -> + ?DTRACE(?C_KEYS_FINISH, [-1], []), + %% Notify the requesting client that an error + %% occurred or the timeout has elapsed. + ClientPid ! {ReqId, Error}, {stop, normal, StateData}; finish(clean, - StateData=#state{from={raw, ReqId, ClientPid}, - client_type=ClientType}) -> - case ClientType of - mapred -> - luke_flow:finish_inputs(ClientPid); - plain -> - ClientPid ! {ReqId, done} - end, + StateData=#state{from={raw, ReqId, ClientPid}}) -> + ClientPid ! {ReqId, done}, + ?DTRACE(?C_KEYS_FINISH, [0], []), {stop, normal, StateData}. %% =================================================================== @@ -146,7 +139,7 @@ finish(clean, ack_keys({Pid, Ref}) -> Pid ! {Ref, ok}. -process_keys(plain, _Bucket, Keys, ReqId, ClientPid) -> +process_keys(_Bucket, Keys, ReqId, ClientPid) -> case use_ack_backpressure() of true -> Monitor = erlang:monitor(process, ClientPid), @@ -159,10 +152,4 @@ process_keys(plain, _Bucket, Keys, ReqId, ClientPid) -> end; false -> ClientPid ! {ReqId, {keys, Keys}} - end; -process_keys(mapred, Bucket, Keys, _ReqId, ClientPid) -> - try - luke_flow:add_inputs(ClientPid, [{Bucket, Key} || Key <- Keys]) - catch _:_ -> - exit(self(), normal) end. diff --git a/src/riak_kv_keys_fsm_legacy.erl b/src/riak_kv_keys_fsm_legacy.erl deleted file mode 100644 index f4894472c8..0000000000 --- a/src/riak_kv_keys_fsm_legacy.erl +++ /dev/null @@ -1,284 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_keys_fsm_legacy: legacy listing of bucket keys -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc legacy listing of bucket keys - --module(riak_kv_keys_fsm_legacy). --behaviour(gen_fsm). --include_lib("riak_kv_vnode.hrl"). --export([start_link/6]). --export([init/1, handle_event/3, handle_sync_event/4, - handle_info/3, terminate/3, code_change/4]). --export([initialize/2,waiting_kl/2]). - --record(state, {client :: pid(), - client_type :: atom(), - bloom :: term(), - pls :: [list()], - wait_pls :: [term()], - simul_pls :: integer(), - vns :: term(), - bucket :: riak_object:bucket(), - input, - timeout :: pos_integer(), - req_id :: pos_integer(), - ring :: riak_core_ring:riak_core_ring(), - listers :: [{atom(), pid()}] - }). - -start_link(ReqId,Bucket,Timeout,ClientType,ErrorTolerance,From) -> - gen_fsm:start_link(?MODULE, - [ReqId,Bucket,Timeout,ClientType,ErrorTolerance,From], []). - -%% @private -init([ReqId,Input,Timeout,ClientType,ErrorTolerance,Client]) -> - process_flag(trap_exit, true), - {ok, Ring} = riak_core_ring_manager:get_my_ring(), - {ok, Bloom} = ebloom:new(10000000,ErrorTolerance,ReqId), - Bucket = case Input of - {B, _} -> - B; - _ -> - Input - end, - StateData = #state{client=Client, client_type=ClientType, timeout=Timeout, - bloom=Bloom, req_id=ReqId, input=Input, bucket=Bucket, ring=Ring}, - case ClientType of - %% Link to the mapred job so we die if the job dies - mapred -> - link(Client); - _ -> - ok - end, - {ok,initialize,StateData,0}. - -%% @private -initialize(timeout, StateData0=#state{input=Input, bucket=Bucket, ring=Ring, req_id=ReqId, timeout=Timeout}) -> - BucketProps = riak_core_bucket:get_bucket(Bucket, Ring), - N = proplists:get_value(n_val,BucketProps), - PLS0 = riak_core_ring:all_preflists(Ring,N), - {LA1, LA2} = lists:partition(fun({A,_B}) -> - A rem N == 0 orelse A rem (N + 1) == 0 - end, - lists:zip(lists:seq(0,(length(PLS0)-1)), PLS0)), - {_, PLS} = lists:unzip(LA1 ++ LA2), - Simul_PLS = trunc(length(PLS) / N), - Listers = start_listers(ReqId, Input, Timeout), - StateData = StateData0#state{pls=PLS,simul_pls=Simul_PLS, listers=Listers, - wait_pls=[],vns=sets:from_list([])}, - %% Make sure there are actually some nodes available - %% to perform the key listing operations. - case Listers of - [] -> - %% No nodes are currently available so return - %% an error back to the requesting party. - finish(StateData); - _ -> - reduce_pls(StateData) - end. - -waiting_kl({ReqId, {kl, _Idx, Keys}}, - StateData=#state{bloom=Bloom, - req_id=ReqId,client=Client,timeout=Timeout, - bucket=Bucket,client_type=ClientType}) -> - process_keys(Keys,Bucket,ClientType,Bloom,ReqId,Client), - {next_state, waiting_kl, StateData, Timeout}; - -waiting_kl({ReqId, Idx, done}, StateData0=#state{wait_pls=WPL0,vns=VNS0,pls=PLS, - req_id=ReqId,timeout=Timeout}) -> - WPL = [{W_Idx,W_Node,W_PL} || {W_Idx,W_Node,W_PL} <- WPL0, W_Idx /= Idx], - WNs = [W_Node || {W_Idx,W_Node,_W_PL} <- WPL0, W_Idx =:= Idx], - Node = case WNs of - [WN] -> WN; - _ -> undefined - end, - VNS = sets:add_element({Idx,Node},VNS0), - StateData = StateData0#state{wait_pls=WPL,vns=VNS}, - case PLS of - [] -> - case WPL of - [] -> finish(StateData); - _ -> {next_state, waiting_kl, StateData, Timeout} - end; - _ -> reduce_pls(StateData) - end; - - -waiting_kl(timeout, StateData=#state{pls=PLS,wait_pls=WPL}) -> - NewPLS = lists:append(PLS, [W_PL || {_W_Idx,_W_Node,W_PL} <- WPL]), - reduce_pls(StateData#state{pls=NewPLS,wait_pls=[]}). - -finish(StateData=#state{req_id=ReqId,client=Client,client_type=ClientType, listers=[]}) -> - case ClientType of - mapred -> - %% No nodes are available for key listing so all - %% we can do now is die so that the rest of the - %% MapReduce processes will also die and be cleaned up. - exit(all_nodes_unavailable); - plain -> - %%Notify the requesting client that the key - %% listing is complete or that no nodes are - %% available to fulfil the request. - Client ! {ReqId, all_nodes_unavailable} - end, - {stop,normal,StateData}; -finish(StateData=#state{req_id=ReqId,client=Client,client_type=ClientType}) -> - case ClientType of - mapred -> - luke_flow:finish_inputs(Client); - plain -> - Client ! {ReqId, done} - end, - {stop,normal,StateData}. - -reduce_pls(StateData0=#state{timeout=Timeout, wait_pls=WPL, - listers=Listers, simul_pls=Simul_PLS}) -> - case find_free_pl(StateData0) of - {none_free,NewPLS} -> - StateData = StateData0#state{pls=NewPLS}, - case NewPLS =:= [] andalso WPL =:= [] of - true -> finish(StateData); - false -> {next_state, waiting_kl, StateData, Timeout} - end; - {[{Idx,Node}|RestPL],PLS} -> - case riak_core_node_watcher:services(Node) of - [] -> - reduce_pls(StateData0#state{pls=[RestPL|PLS]}); - _ -> - %% Look up keylister for that node - case proplists:get_value(Node, Listers) of - undefined -> - %% Node is down or hasn't been removed from preflists yet - %% Log a warning, skip the node and continue sending - %% out key list requests - error_logger:warning_msg("Skipping keylist request for unknown node: ~p~n", [Node]), - WaitPLS = [{Idx,Node,RestPL}|WPL], - StateData = StateData0#state{pls=PLS, wait_pls=WaitPLS}, - reduce_pls(StateData); - LPid -> - %% Send the keylist request to the lister - riak_kv_keylister_legacy:list_keys(LPid, {Idx, Node}), - WaitPLS = [{Idx,Node,RestPL}|WPL], - StateData = StateData0#state{pls=PLS, wait_pls=WaitPLS}, - case length(WaitPLS) > Simul_PLS of - true -> - {next_state, waiting_kl, StateData, Timeout}; - false -> - reduce_pls(StateData) - end - end - end - end. - -find_free_pl(StateData) -> find_free_pl1(StateData, []). -find_free_pl1(_StateData=#state{pls=[]}, NotFree) -> {none_free,NotFree}; -find_free_pl1(StateData=#state{wait_pls=WPL,pls=[PL|PLS],vns=VNS}, NotFree) -> - case PL of - [] -> find_free_pl1(StateData#state{pls=PLS}, NotFree); - _ -> - case check_pl(PL,VNS,WPL) of - redundant -> find_free_pl1(StateData#state{pls=PLS},NotFree); - notfree -> find_free_pl1(StateData#state{pls=PLS},[PL|NotFree]); - free -> {PL,lists:append(PLS,NotFree)} - end - end. - -check_pl(PL,VNS,WPL) -> - case sets:is_disjoint(sets:from_list(PL),VNS) of - false -> redundant; - true -> - PL_Nodes = sets:from_list([Node || {_Idx,Node} <- PL]), - WaitNodes = sets:from_list([Node || {_Idx,Node,_RestPL} <- WPL]), - case sets:is_disjoint(PL_Nodes,WaitNodes) of - false -> notfree; - true -> free - end - end. - -%% @private -process_keys(Keys,Bucket,ClientType,Bloom,ReqId,Client) -> - process_keys(Keys,Bucket,ClientType,Bloom,ReqId,Client,[]). -%% @private -process_keys([],Bucket,ClientType,_Bloom,ReqId,Client,Acc) -> - case ClientType of - mapred -> - try - luke_flow:add_inputs(Client, [{Bucket,K} || K <- Acc]) - catch _:_ -> - exit(self(), normal) - end; - plain -> Client ! {ReqId, {keys, Acc}} - end, - ok; -process_keys([K|Rest],Bucket,ClientType,Bloom,ReqId,Client,Acc) -> - case ebloom:contains(Bloom,K) of - true -> - process_keys(Rest,Bucket,ClientType, - Bloom,ReqId,Client,Acc); - false -> - ebloom:insert(Bloom,K), - process_keys(Rest,Bucket,ClientType, - Bloom,ReqId,Client,[K|Acc]) - end. - -%% @private -handle_event(_Event, _StateName, StateData) -> - {stop,badmsg,StateData}. - -%% @private -handle_sync_event(_Event, _From, _StateName, StateData) -> - {stop,badmsg,StateData}. - -%% @private -handle_info({'EXIT', Pid, Reason}, _StateName, #state{client=Pid}=StateData) -> - {stop,Reason,StateData}; -handle_info({_ReqId, {ok, _Pid}}, StateName, StateData=#state{timeout=Timeout}) -> - %% Received a message from a key lister node that - %% did not start up within the timeout. Just ignore - %% the message and move on. - {next_state, StateName, StateData, Timeout}; -handle_info(_Info, _StateName, StateData) -> - {stop,badmsg,StateData}. - -%% @private -terminate(Reason, _StateName, #state{bloom=Bloom}) -> - ebloom:clear(Bloom), - Reason. - -%% @private -code_change(_OldVsn, StateName, State, _Extra) -> - {ok, StateName, State}. - -%% @private -start_listers(ReqId, Bucket, Timeout) -> - Nodes = riak_core_node_watcher:nodes(riak_kv), - start_listers(Nodes, ReqId, Bucket, Timeout, []). - -start_listers([], _ReqId, _Bucket, _Timeout, Accum) -> - Accum; -start_listers([H|T], ReqId, Bucket, Timeout, Accum) -> - case riak_kv_keylister_master:start_keylist(H, ReqId, Bucket, Timeout) of - {ok, Pid} -> - start_listers(T, ReqId, Bucket, Timeout, [{H, Pid}|Accum]); - _Error -> - start_listers(T, ReqId, Bucket, Timeout, Accum) - end. diff --git a/src/riak_kv_keys_fsm_legacy_sup.erl b/src/riak_kv_keys_fsm_legacy_sup.erl deleted file mode 100644 index 03af802e3b..0000000000 --- a/src/riak_kv_keys_fsm_legacy_sup.erl +++ /dev/null @@ -1,49 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_keys_fsm_legacy_sup: supervise the legacy riak_kv keys -%% state machines. -%% -%% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc supervise the legacy riak_kv keys state machines - --module(riak_kv_keys_fsm_legacy_sup). - --behaviour(supervisor). - --export([start_keys_fsm/2]). --export([start_link/0]). --export([init/1]). - -start_keys_fsm(Node, Args) -> - supervisor:start_child({?MODULE, Node}, Args). - -%% @spec start_link() -> ServerRet -%% @doc API for starting the supervisor. -start_link() -> - supervisor:start_link({local, ?MODULE}, ?MODULE, []). - -%% @spec init([]) -> SupervisorTree -%% @doc supervisor callback. -init([]) -> - KeysFsmSpec = {undefined, - {riak_kv_keys_fsm_legacy, start_link, []}, - temporary, 5000, worker, [riak_kv_keys_fsm_legacy]}, - - {ok, {{simple_one_for_one, 10, 10}, [KeysFsmSpec]}}. diff --git a/src/riak_kv_legacy_vnode.erl b/src/riak_kv_legacy_vnode.erl index 399aabc336..3742087fa4 100644 --- a/src/riak_kv_legacy_vnode.erl +++ b/src/riak_kv_legacy_vnode.erl @@ -71,9 +71,10 @@ rewrite_cast({vnode_get, {Partition,_Node}, rewrite_cast({vnode_list_bucket, {Partition,_Node}, {FSM_pid, Bucket, ReqID}}) -> Req = riak_core_vnode_master:make_request( - #riak_kv_listkeys_req_v1{ + #riak_kv_listkeys_req_v2{ bucket=Bucket, - req_id=ReqID}, + req_id=ReqID, + caller=self()}, {fsm, undefined, FSM_pid}, Partition), {ok, Req}. @@ -167,7 +168,7 @@ legacy_kv_test_() -> ReqId = 456, RealStartTime = {0,0,0}, Options = [], - send_0_11_0_cmd(vnode_put, + send_0_11_0_cmd(vnode_put, {self(), {Bucket,Key}, RObj1, ReqId, RealStartTime, Options}), receive Msg -> @@ -202,9 +203,7 @@ legacy_kv_test_() -> send_0_11_0_cmd(Cmd, Msg) -> gen_server:cast({riak_kv_vnode_master, node()}, {Cmd, {0, node()}, Msg}). - + -endif. % BROKEN_EUNIT -endif. - - diff --git a/src/riak_kv_lru.erl b/src/riak_kv_lru.erl deleted file mode 100644 index 2c75664090..0000000000 --- a/src/riak_kv_lru.erl +++ /dev/null @@ -1,352 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_lru: ETS-based LRU cache -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(riak_kv_lru). - --ifdef(TEST). --include_lib("eunit/include/eunit.hrl"). --endif. - --export([new/1, - put/4, - remove/3, - fetch/3, - size/1, - max_size/1, - clear/1, - clear_bkey/2, - destroy/1, - table_sizes/1]). --export([init/1, - handle_call/3, - handle_cast/2, - handle_info/2, - terminate/2]). - --record(kv_lru, {max_size, - bucket_idx, - age_idx, - cache}). - --record(kv_lru_entry, {key, - value, - ts}). - -new(0) -> - nocache; -new(Size) -> - {ok, Pid} = gen_server:start_link(?MODULE, [Size], []), - Pid. - -put(nocache, _BKey, _Key, _Value) -> - ok; -put(Pid, BKey, Key, Value) -> - gen_server:cast(Pid, {put, BKey, Key, Value}). - -fetch(nocache, _BKey, _Key) -> - notfound; -fetch(Pid, BKey, Key) -> - gen_server:call(Pid, {fetch, BKey, Key}). - -remove(nocache, _BKey, _Key) -> - ok; -remove(Pid, BKey, Key) -> - gen_server:cast(Pid, {remove, BKey, Key}). - -size(nocache) -> - 0; -size(Pid) -> - gen_server:call(Pid, size). - -max_size(nocache) -> - 0; -max_size(Pid) -> - gen_server:call(Pid, max_size). - -clear(nocache) -> - ok; -clear(Pid) -> - gen_server:cast(Pid, clear). - -clear_bkey(nocache, _BKey) -> - ok; -clear_bkey(Pid, BKey) -> - gen_server:cast(Pid, {clear_bkey, BKey}). - -destroy(nocache) -> - ok; -destroy(Pid) -> - gen_server:call(Pid, destroy). - -%% for test usage -table_sizes(Pid) -> - gen_server:call(Pid, table_sizes). - -init([Size]) -> - IdxName = pid_to_list(self()) ++ "_cache_age_idx", - BucketIdxName = pid_to_list(self()) ++ "_bucket_idx", - CacheName = pid_to_list(self()) ++ "_cache", - Idx = ets:new(list_to_atom(IdxName), [ordered_set, private]), - BucketIdx = ets:new(list_to_atom(BucketIdxName), [bag, private]), - Cache = ets:new(list_to_atom(CacheName), [private, {keypos, 2}]), - {ok, #kv_lru{max_size=Size, age_idx=Idx, bucket_idx=BucketIdx, cache=Cache}}. - -handle_call({fetch, BKey, Key}, _From, State) -> - Reply = fetch_internal(State, BKey, Key), - {reply, Reply, State}; -handle_call(size, _From, State) -> - Reply = size_internal(State), - {reply, Reply, State}; -handle_call(max_size, _From, State) -> - Reply = max_size_internal(State), - {reply, Reply, State}; -handle_call(destroy, _From, State) -> - {Reply, NewState} = destroy_internal(State), - {stop, normal, Reply, NewState}; -handle_call(table_sizes, _From, State) -> - Reply = table_sizes_internal(State), - {reply, Reply, State}; -handle_call(_Request, _From, State) -> - {reply, ok, State}. - -handle_cast({put, BKey, Key, Value}, State) -> - put_internal(State, BKey, Key, Value), - {noreply, State}; -handle_cast({remove, BKey, Key}, State) -> - remove_internal(State, BKey, Key), - {noreply, State}; -handle_cast(clear, State) -> - clear_internal(State), - {noreply, State}; -handle_cast({clear_bkey, BKey}, State) -> - clear_bkey_internal(State, BKey), - {noreply, State}; -handle_cast(_Request, State) -> - {noreply, State}. - -handle_info(_Info, State) -> - {noreply, State}. - -terminate(_Reason, State) -> - destroy_internal(State), - ok. - -put_internal(#kv_lru{max_size=MaxSize, age_idx=Idx, - bucket_idx=BucketIdx, cache=Cache}, - BKey, Key, Value) -> - remove_existing(Idx, BucketIdx, Cache, BKey, Key), - insert_value(Idx, BucketIdx, Cache, BKey, Key, Value), - prune_oldest_if_needed(MaxSize, Idx, BucketIdx, Cache). - -fetch_internal(#kv_lru{cache=Cache}=LRU, BKey, Key) -> - case fetch_value(Cache, BKey, Key) of - notfound -> - notfound; - Value -> - %% Do a put to update the timestamp in the cache - put_internal(LRU, BKey, Key, Value), - Value - end. - -remove_internal(#kv_lru{age_idx=Idx, bucket_idx=BucketIdx, cache=Cache}, - BKey, Key) -> - remove_existing(Idx, BucketIdx, Cache, BKey, Key), - ok. - -size_internal(#kv_lru{age_idx=Idx}) -> - ets:info(Idx, size). - -max_size_internal(#kv_lru{max_size=MaxSize}) -> - MaxSize. - -clear_internal(#kv_lru{age_idx=Idx, cache=Cache}) -> - ets:delete_all_objects(Idx), - ets:delete_all_objects(Cache), - ok. - -clear_bkey_internal(#kv_lru{bucket_idx=BucketIdx}=LRU, BKey) -> - case ets:lookup(BucketIdx, BKey) of - [] -> - ok; - BK_Ks -> - [remove_internal(LRU, BKey, Key) || {_BKey, Key} <- BK_Ks], - ok - end. - -destroy_internal(#kv_lru{age_idx=undefined, bucket_idx=undefined, cache=undefined}=State) -> - {ok, State}; -destroy_internal(#kv_lru{age_idx=Idx, bucket_idx=BucketIdx, cache=Cache}) -> - ets:delete(Idx), - ets:delete(BucketIdx), - ets:delete(Cache), - {ok, #kv_lru{age_idx=undefined, bucket_idx=undefined, cache=undefined}}. - -table_sizes_internal(#kv_lru{age_idx=Idx, bucket_idx=BucketIdx, cache=Cache}) -> - [{age_idx, ets:info(Idx, size)}, - {bucket_idx, ets:info(BucketIdx, size)}, - {cache, ets:info(Cache, size)}]. - -%% Internal functions -remove_existing(Idx, BucketIdx, Cache, BKey, Key) -> - CacheKey = {BKey, Key}, - case ets:lookup(Cache, CacheKey) of - [Entry] -> - ets:delete(Idx, Entry#kv_lru_entry.ts), - ets:delete_object(BucketIdx, CacheKey), - ets:delete(Cache, CacheKey), - ok; - [] -> - ok - end. - -insert_value(Idx, BucketIdx, Cache, BKey, Key, Value) -> - CacheKey = {BKey, Key}, - TS = erlang:now(), - Entry = #kv_lru_entry{key=CacheKey, value=Value, ts=TS}, - ets:insert_new(Cache, Entry), - ets:insert_new(Idx, {TS, CacheKey}), - ets:insert(BucketIdx, CacheKey). - -prune_oldest_if_needed(MaxSize, Idx, BucketIdx, Cache) -> - OverSize = MaxSize + 1, - case ets:info(Idx, size) of - OverSize -> - TS = ets:first(Idx), - [{TS, {BKey, Key}}] = ets:lookup(Idx, TS), - remove_existing(Idx, BucketIdx, Cache, BKey, Key), - ok; - _ -> - ok - end. - -fetch_value(Cache, BKey, Key) -> - CacheKey = {BKey, Key}, - case ets:lookup(Cache, CacheKey) of - [] -> - notfound; - [Entry] -> - Entry#kv_lru_entry.value - end. - --ifdef(TEST). -put_fetch_test() -> - BKey = {<<"test">>, <<"foo">>}, - C = riak_kv_lru:new(5), - riak_kv_lru:put(C, BKey, <<"hello">>, <<"world">>), - <<"world">> = riak_kv_lru:fetch(C, BKey, <<"hello">>), - riak_kv_lru:destroy(C). - -delete_test() -> - BKey = {<<"test">>, <<"foo">>}, - C = riak_kv_lru:new(5), - riak_kv_lru:put(C, BKey, "hello", "world"), - riak_kv_lru:remove(C, BKey, "hello"), - notfound = riak_kv_lru:fetch(C, BKey, "hello"), - riak_kv_lru:destroy(C). - -size_test() -> - BKey = {<<"test">>, <<"foo">>}, - C = riak_kv_lru:new(5), - [riak_kv_lru:put(C, BKey, X, X) || X <- lists:seq(1, 6)], - notfound = riak_kv_lru:fetch(C, BKey, 1), - 5 = riak_kv_lru:size(C), - 5 = riak_kv_lru:max_size(C), - 2 = riak_kv_lru:fetch(C, BKey, 2), - 6 = riak_kv_lru:fetch(C, BKey, 6), - riak_kv_lru:destroy(C). - -age_test() -> - BKey = {<<"test">>, <<"foo">>}, - C = riak_kv_lru:new(3), - [riak_kv_lru:put(C, BKey, X, X) || X <- lists:seq(1, 3)], - timer:sleep(500), - 2 = riak_kv_lru:fetch(C, BKey, 2), - riak_kv_lru:put(C, BKey, 4, 4), - 2 = riak_kv_lru:fetch(C, BKey, 2), - 4 = riak_kv_lru:fetch(C, BKey, 4), - notfound = riak_kv_lru:fetch(C, BKey, 1), - riak_kv_lru:destroy(C). - -clear_bkey_test() -> - BKey1 = {<<"test">>, <<"foo">>}, - BKey2 = {<<"test">>, <<"bar">>}, - C = riak_kv_lru:new(10), - F = fun(X) -> - riak_kv_lru:put(C, BKey1, X, X), - riak_kv_lru:put(C, BKey2, X, X) end, - [F(X) || X <- lists:seq(1, 5)], - riak_kv_lru:clear_bkey(C, BKey2), - notfound = riak_kv_lru:fetch(C, BKey2, 3), - 3 = riak_kv_lru:fetch(C, BKey1, 3), - riak_kv_lru:destroy(C). - -zero_size_test() -> - BKey = {<<"test">>, <<"foo">>}, - C = riak_kv_lru:new(0), - ok = riak_kv_lru:put(C, BKey, 1, 1), - notfound = riak_kv_lru:fetch(C, BKey, 1), - 0 = riak_kv_lru:size(C), - riak_kv_lru:destroy(C). - -consistency_test() -> - BKey = {<<"test">>, <<"foo">>}, - C = riak_kv_lru:new(3), - F = fun(X) -> - riak_kv_lru:put(C, BKey, X, X) - end, - [F(X) || X <- lists:seq(1,10)], - consistency_check(C). - -%% Make sure that riak_kv_lru is correct under concurrent modification -%% by spawning 10 processes that each do 1000 puts on the same LRU, then -%% checking that the size limit of the cache has been respected -%% (added to check https://issues.basho.com/show_bug.cgi?id=969) -concurrency_test() -> - Size = 10, - C = riak_kv_lru:new(Size), - Pids = [ spawn_link(concurrent_incrementer(C, K, self())) - || K <- lists:seq(10000, 10010) ], - wait_for_incrementers(Pids), - consistency_check(C), - ?assertEqual(Size, riak_kv_lru:size(C)). - -concurrent_incrementer(C, K, Test) -> - fun() -> - [ riak_kv_lru:put(C, N+K, N+K, N+K) - || N <- lists:seq(1, 1000) ], - Test ! {increment_done, self()} - end. - -wait_for_incrementers([]) -> ok; -wait_for_incrementers(Pids) -> - receive {increment_done, Pid} -> - wait_for_incrementers(lists:delete(Pid, Pids)) - after 5000 -> - throw(incrementer_timeout) - end. - -consistency_check(LRU) -> - Ts = table_sizes(LRU), - %% make sure all tables report same size - UniqueSizes = lists:usort([ Size || {_Name, Size} <- Ts]), - ?assertEqual(1, length(UniqueSizes)). - --endif. diff --git a/src/riak_kv_map_master.erl b/src/riak_kv_map_master.erl deleted file mode 100644 index dd9f2c1a8c..0000000000 --- a/src/riak_kv_map_master.erl +++ /dev/null @@ -1,262 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_map_master: spins up batched map tasks on behalf of map phases -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(riak_kv_map_master). --include_lib("riak_kv_js_pools.hrl"). - --behaviour(gen_server2). - -%% API --export([start_link/0, - queue_depth/0, - new_mapper/4]). - -%% gen_server callbacks --export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3]). - --define(SERVER, ?MODULE). - --record(mapper, {vnode, - qterm, - inputs, - phase}). - --record(state, {datadir, - store, - highest, - next}). - -new_mapper({_, Node}=VNode, QTerm, MapInputs, PhasePid) -> - gen_server2:pcall({?SERVER, Node}, 5, {new_mapper, VNode, - QTerm, MapInputs, PhasePid}, infinity). - -queue_depth() -> - Nodes = [node()|nodes()], - [{Node, gen_server2:pcall({?SERVER, Node}, 0, queue_depth, - infinity)} || Node <- Nodes]. - - -start_link() -> - gen_server2:start_link({local, ?SERVER}, ?MODULE, [], []). - -init([]) -> - process_flag(trap_exit, true), - DataDir = init_data_dir(), - Store = bitcask:open(DataDir, [read_write]), - {ok, NextCounter} = file:open(filename:join(DataDir, "_next_"), [read, write, raw, binary]), - {ok, HighestCounter} = file:open(filename:join(DataDir, "_highest_"), [read, write, raw, binary]), - State = #state{datadir=DataDir, store=Store, highest=HighestCounter, - next=NextCounter}, - reset_counters(State), - timer:send_interval(60000, merge_storage), - {ok, State}. - -handle_call({new_mapper, VNode, {erlang, _}=QTerm, MapInputs, PhasePid}, _From, State) -> - Id = make_id(), - case riak_kv_mapper_sup:new_mapper(VNode, Id, QTerm, MapInputs, PhasePid) of - {ok, _Pid} -> - {reply, {ok, Id}, State}; - {error, Reason} -> - {reply, {error, Reason}, State} - end; - -handle_call({new_mapper, VNode, {javascript, _}=QTerm, MapInputs, PhasePid}, _From, State) -> - case riak_kv_js_manager:pool_size(?JSPOOL_MAP) > 0 of - true -> - Id = make_id(), - case riak_kv_mapper_sup:new_mapper(VNode, Id, QTerm, MapInputs, PhasePid) of - {ok, Pid} -> - erlang:monitor(process, Pid), - {reply, {ok, Id}, State}; - {error, Reason} -> - {reply, {error, Reason}, State} - end; - false -> - Id = defer_mapper(VNode, QTerm, MapInputs, PhasePid, State), - {reply, {ok, {Id, node()}}, State} - end; - -handle_call(queue_depth, _From, #state{highest=Highest, next=Next}=State) -> - H = read_counter(Highest), - N = read_counter(Next), - Reply = H - N, - {reply, Reply, State}; - -handle_call(_Request, _From, State) -> - {reply, ignore, State}. - -handle_cast(_Msg, State) -> - {noreply, State}. - -%% Dequeue a deferred mapper when a mapper exits -handle_info({'DOWN', _A, _B, _Mapper, _C}, State) -> - dequeue_mapper(State), - {noreply, State}; - -handle_info(merge_storage, #state{store=Store, datadir=DataDir}=State) -> - case bitcask:needs_merge(Store) of - {true, Files} -> - bitcask_merge_worker:merge(DataDir, [], Files); - false -> - ok - end, - {noreply, State}; - -handle_info(_Info, State) -> - {noreply, State}. - -terminate(_Reason, #state{store=Store, highest=Highest, next=Next}) -> - file:close(Highest), - file:close(Next), - bitcask:close(Store). - -code_change(_OldVsn, State, _Extra) -> - {ok, State}. - -%% Internal functions -make_id() -> - {_, _, T3} = erlang:now(), - {T3, node()}. - -dequeue_mapper(State) -> - case are_mappers_waiting(State) of - false -> - ok; - true -> - Id = read(State#state.next), - Mapper = read_entry(Id, State#state.store), - case is_mapper_runnable(Mapper) of - false -> - incr(State#state.next), - delete_entry(Id, State#state.store), - dequeue_mapper(State); - true -> - #mapper{vnode=VNode, qterm=QTerm, - inputs=MapInputs, phase=Phase} = Mapper, - case riak_kv_js_manager:pool_size(?JSPOOL_MAP) > 0 of - true -> - {ok, Pid} = riak_kv_mapper_sup:new_mapper(VNode, {Id, node()}, QTerm, - MapInputs, Phase), - erlang:monitor(process, Pid), - incr(State#state.next), - delete_entry(Id, State#state.store), - dequeue_mapper(State); - false -> - ok - end - end - end. - -defer_mapper(VNode, QTerm, MapInputs, PhasePid, State) -> - Mapper = #mapper{vnode=VNode, qterm=QTerm, inputs=MapInputs, phase=PhasePid}, - Id = read_incr(State#state.highest), - write_entry(Id, Mapper, State#state.store). - -reset_counters(State) -> - case are_mappers_waiting(State) of - false -> - file:pwrite(State#state.highest, 0, <<0:64>>), - file:sync(State#state.highest), - file:pwrite(State#state.next, 0, <<0:64>>), - file:sync(State#state.next); - true -> - dequeue_mapper(State) - end. - -read(CounterFile) -> - Counter = read_counter(CounterFile), - list_to_binary(integer_to_list(Counter)). - -incr(CounterFile) -> - Counter = read_counter(CounterFile), - NewCounter = Counter + 1, - ok = file:pwrite(CounterFile, 0, <>), - file:sync(CounterFile). - -read_incr(CounterFile) -> - Counter = read_counter(CounterFile), - NewCounter = Counter + 1, - ok = file:pwrite(CounterFile, 0, <>), - file:sync(CounterFile), - list_to_binary(integer_to_list(Counter)). - -read_counter(Counter) -> - case file:pread(Counter, 0, 8) of - eof -> - 0; - {ok, Data} -> - <> = Data, - V; - Error -> - throw(Error) - end. - -are_mappers_waiting(State) -> - Highest = read_counter(State#state.highest), - Next = read_counter(State#state.next), - Next < Highest. - -is_mapper_runnable({error,_}) -> false; -is_mapper_runnable(not_found) -> false; -is_mapper_runnable(#mapper{phase=Phase}) -> - Node = node(Phase), - ClusterNodes = riak_core_node_watcher:nodes(riak_kv), - lists:member(Node, ClusterNodes) andalso rpc:call(Node, erlang, is_process_alive, - [Phase]). - -write_entry(Id, Mapper, Store) -> - ok = bitcask:put(Store, Id, term_to_binary(Mapper, [compressed])), - Id. - -read_entry(Id, Store) -> - case bitcask:get(Store, Id) of - {ok, D} -> binary_to_term(D); - Err -> Err - end. - -delete_entry(Id, Store) -> - bitcask:delete(Store, Id). - -ensure_dir(Dir) -> - filelib:ensure_dir(filename:join(Dir, ".empty")). - -init_data_dir() -> - %% There are some upgrade situations where the mapred_queue_dir, is not - %% specified and as such we'll wind up using the mr_queue dir, - %% relative to platform_data_dir. - %% We fallback to creating the mr_queue in /tmp. - P_DataDir = app_helper:get_env(riak_core, platform_data_dir), - DataDir0 = app_helper:get_env(riak_kv, mapred_queue_dir, - filename:join(P_DataDir, "mr_queue")), - case ensure_dir(DataDir0) of - ok -> - DataDir0; - {error, Reason} -> - TmpDir = "/tmp/mr_queue", - lager:warning("Failed to create ~p for mapred_queue_dir " - "defaulting to %s: ~p", - [DataDir0, TmpDir, Reason]), - ok = ensure_dir(TmpDir), - TmpDir - end. - diff --git a/src/riak_kv_map_phase.erl b/src/riak_kv_map_phase.erl deleted file mode 100644 index de74c814f7..0000000000 --- a/src/riak_kv_map_phase.erl +++ /dev/null @@ -1,301 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_map_phase: manage the mechanics of a map phase of a MR job -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(riak_kv_map_phase). --author('Kevin Smith '). --author('John Muellerleile '). - --include("riak_kv_map_phase.hrl"). - --behaviour(luke_phase). - --export([init/1, handle_input/3, handle_input_done/1, handle_event/2, - handle_sync_event/3, handle_info/2, handle_timeout/1, terminate/2]). - --record(state, {done=false, qterm, fsms=dict:new(), mapper_data=[], pending=[]}). - -init([QTerm]) -> - process_flag(trap_exit, true), - {ok, #state{qterm=QTerm}}. - -handle_input(Inputs0, #state{fsms=FSMs0, qterm=QTerm, mapper_data=MapperData}=State, _Timeout) -> - {ok, Ring} = riak_core_ring_manager:get_my_ring(), - Inputs1 = [build_input(I, Ring) || I <- Inputs0], - case length(Inputs1) > 0 of - true -> - ClaimLists = riak_kv_mapred_planner:plan_map(Inputs1), - case schedule_input(Inputs1, ClaimLists, QTerm, FSMs0, State) of - {NewFSMs, _ClaimLists1, FsmKeys} -> - MapperData1 = MapperData ++ FsmKeys, - {no_output, State#state{fsms=NewFSMs, mapper_data=MapperData1}}; - {error, exhausted_preflist} -> - {stop, {error, {no_candidate_nodes, exhausted_prefist, erlang:get_stacktrace(), MapperData}}, State} - end; - false -> - {no_output, State} - end. - -handle_input_done(State) -> - maybe_done(State#state{done=true}). - -handle_event({register_mapper, Id, MapperPid}, #state{mapper_data=MapperData}=State) -> - MapperData0 = case lists:keyfind(Id, 1, MapperData) of - {Id, MapperProps} -> lists:keyreplace(Id, 1, MapperData, {Id, [{pid, MapperPid}|MapperProps]}); - false -> MapperData - end, - MapperData1 = MapperData0 ++ [{MapperPid, Id}], - {no_output, State#state{mapper_data=MapperData1}}; - -handle_event({mapexec_reply, VNode, BKey, Reply, Executor}, #state{fsms=FSMs, mapper_data=MapperData, - pending=Pending}=State) -> - case dict:is_key(Executor, FSMs) of - false -> - %% node retry case will produce dictionary miss - maybe_done(State); - true -> - case Reply of - [{not_found, _, _}] -> - handle_not_found_reply(VNode, BKey, Executor, State, Reply); - [{error, notfound}] -> - handle_not_found_reply(VNode, BKey, Executor, State, Reply); - _ -> - Pending1 = Pending ++ Reply, - FSMs1 = update_counter(Executor, FSMs), - MapperData1 = update_inputs(Executor, VNode, BKey, MapperData), - maybe_done(State#state{fsms=FSMs1, mapper_data=MapperData1, pending=Pending1}) - end - - end; - -handle_event({mapexec_error, _Executor, Reply}, State) -> - %{no_output, State}; - {stop, Reply, State#state{fsms=[]}}; -handle_event(_Event, State) -> - {no_output, State}. - - -handle_info({'EXIT', Pid, _Reason}, #state{mapper_data=MapperData, fsms=FSMs, qterm=QTerm}=State) -> - case lists:keyfind(Pid, 1, MapperData) of - {Pid, Id} -> - case lists:keyfind(Id, 1, MapperData) of - {Id, MapperProps} -> - {keys, {VNode, Keys}} = lists:keyfind(keys, 1, MapperProps), - case length(Keys) of - 0 -> - MapperData1 = lists:keydelete(Id, 1, lists:keydelete(Pid, 1, MapperData)), - maybe_done(State#state{mapper_data=MapperData1}); - _C -> - try - {_Partition, BadNode} = VNode, - NewKeys = prune_input_nodes(Keys, BadNode), - ClaimLists = riak_kv_mapred_planner:plan_map(NewKeys), - case schedule_input(NewKeys, ClaimLists, QTerm, FSMs, State) of - {NewFSMs, _ClaimLists1, FsmKeys} -> - MapperData1 = lists:keydelete(Id, 1, lists:keydelete(Pid, 1, MapperData ++ FsmKeys)), - maybe_done(State#state{mapper_data=MapperData1, fsms=NewFSMs}); - {error, exhausted_preflist} -> - MapperData1 = lists:keydelete(Id, 1, lists:keydelete(Pid, 1, MapperData)), - maybe_done(State#state{mapper_data=MapperData1, fsms=FSMs}) - end - catch - _:Error -> - {stop, {error, {no_candidate_nodes, Error, erlang:get_stacktrace(), MapperData}}, State} - end - end; - false -> - MapperData1 = lists:keydelete(Pid, 1, MapperData), - maybe_done(State#state{mapper_data=MapperData1}) - end; - false -> - {stop, {error, {dead_mapper, erlang:get_stacktrace(), MapperData}}, State} - end; - -handle_info(_Info, State) -> - {no_output, State}. - -handle_sync_event(_Event, _From, State) -> - {reply, ignored, State}. - -handle_timeout(State) -> - {no_output, State}. - -terminate(_Reason, _State) -> - _Reason. - -%% Internal functions - -schedule_input(Inputs1, ClaimLists, QTerm, FSMs0, State) -> - try - {FSMs1, FsmKeys} = start_mappers(ClaimLists, QTerm, FSMs0, []), - {FSMs1, ClaimLists, FsmKeys} - catch - exit:{{nodedown, Node}, _} -> - Inputs2 = prune_input_nodes(Inputs1, Node), - try riak_kv_mapred_planner:plan_map(Inputs2) of - ClaimLists2 -> - schedule_input(Inputs2, ClaimLists2, QTerm, FSMs0, State) - catch exit:exhausted_preflist -> - {error, exhausted_preflist} - end; - Error -> - throw(Error) - end. - -prune_input_nodes(Inputs, BadNode) -> - prune_input_nodes(Inputs, BadNode, []). -prune_input_nodes([], _BadNode, NewInputs) -> - NewInputs; -prune_input_nodes([Input|T], BadNode, NewInputs) -> - #riak_kv_map_input{preflist=Targets} = Input, - Targets2 = lists:keydelete(BadNode, 2, Targets), - prune_input_nodes(T, BadNode, [Input#riak_kv_map_input{preflist=Targets2}|NewInputs]). - -prune_input_partitions(Inputs, BadNode) -> - prune_input_partitions(Inputs, BadNode, []). -prune_input_partitions([], _BadNode, NewInputs) -> - NewInputs; -prune_input_partitions([Input|T], BadPartition, NewInputs) -> - #riak_kv_map_input{preflist=Targets} = Input, - Targets2 = lists:keydelete(BadPartition, 1, Targets), - prune_input_partitions(T, BadPartition, [Input#riak_kv_map_input{preflist=Targets2}|NewInputs]). - - -build_input(I, Ring) -> - {{Bucket, Key}, KD} = convert_input(I), - Props = riak_core_bucket:get_bucket(Bucket, Ring), - {value, {_, NVal}} = lists:keysearch(n_val, 1, Props), - Idx = riak_core_util:chash_key({Bucket, Key}), - PL = riak_core_ring:preflist(Idx, Ring), - {Targets, _} = lists:split(NVal, PL), - #riak_kv_map_input{bkey={Bucket, Key}, - bprops=Props, - kd=KD, - preflist=Targets}. - -convert_input(I={{_B,_K},_D}) - when is_binary(_B) andalso (is_list(_K) orelse is_binary(_K)) -> I; -convert_input(I={_B,_K}) - when is_binary(_B) andalso (is_list(_K) orelse is_binary(_K)) -> {I,undefined}; -convert_input([B,K]) when is_binary(B), is_binary(K) -> {{B,K},undefined}; -convert_input([B,K,D]) when is_binary(B), is_binary(K) -> {{B,K},D}; -convert_input({struct, [{<<"not_found">>, - {struct, [{<<"bucket">>, Bucket}, - {<<"key">>, Key}]}}]}) -> - {{Bucket, Key}, undefined}; -convert_input({not_found, {Bucket, Key}, KD}) -> - {{Bucket, Key}, KD}; -convert_input(I) -> I. - -start_mappers([], _QTerm, Accum, FsmKeys) -> - {Accum, FsmKeys}; -start_mappers([{Partition, Inputs}|T], QTerm, Accum, FsmKeys) -> - case riak_kv_map_master:new_mapper(Partition, QTerm, Inputs, self()) of - {ok, FSM} -> - Accum1 = dict:store(FSM, length(Inputs), Accum), - start_mappers(T, QTerm, Accum1, FsmKeys ++ [{FSM, [{keys, {Partition, Inputs}}]}]); - Error -> - throw(Error) - end. - -update_counter(Executor, FSMs) -> - case dict:find(Executor, FSMs) of - {ok, 1} -> - dict:erase(Executor, FSMs); - {ok, _C} -> - dict:update_counter(Executor, -1, FSMs) - end. - -maybe_done(#state{done=Done, fsms=FSMs, mapper_data=MapperData, pending=Pending}=State) -> - case Done =:= true andalso dict:size(FSMs) == 0 andalso MapperData == [] of - true -> - luke_phase:complete(), - case Pending of - [] -> - {no_output, State}; - _ -> - {output, Pending, State#state{pending=[]}} - end; - false -> - BatchSize = app_helper:get_env(riak_kv, mapper_batch_size, 5), - case length(Pending) >= BatchSize of - true -> - {output, Pending, State#state{pending=[]}}; - false -> - {no_output, State} - end - end. - -update_inputs(Id, VNode, BKey, MapperData) -> - case lists:keyfind(Id, 1, MapperData) of - {Id, MapperProps} -> - case lists:keyfind(keys, 1, MapperProps) of - {keys, {VNode, Keys}} -> - MapperProps1 = lists:keyreplace(keys, 1, MapperProps, - {keys, {VNode, lists:keydelete(BKey, 2, Keys)}}), - lists:keyreplace(Id, 1, MapperData, {Id, MapperProps1}); - false -> throw(bad_mapper_props_no_keys); - _ -> MapperData - end; - false -> throw(bad_mapper_props_no_id) - end. - -handle_not_found_reply(VNode, BKey, Executor, #state{fsms=FSMs, mapper_data=MapperData, qterm=QTerm, pending=Pending}=State, Reply) -> - %% If the reply is not_found, then check if there are other - %% preflist entries that can be tried before giving up. - - %% Look up the properties for the replying mapper - {_Id, MapperProps} = lists:keyfind(Executor, 1, MapperData), - %% Extract the vnode data and the list of inputs - {keys, {VNode, Keys}} = lists:keyfind(keys, 1, MapperProps), - - %% Remove the current partition from - %% the list of potential inputs. - {BadPartition, _Node} = VNode, - NewKeys = prune_input_partitions(Keys, BadPartition), - - %% Create a new map plan using a different preflist entry. - %% The call to plan_map will call exit with reason - %% exhausted_preflist if all the preference list - %% entries have been checked. - try riak_kv_mapred_planner:plan_map(NewKeys) of - ClaimLists -> - FSMs1 = dict:erase(Executor, FSMs), - case schedule_input(NewKeys, ClaimLists, QTerm, FSMs1, State) of - {NewFSMs, _ClaimLists1, FsmKeys} -> - MapperData1 = lists:keydelete(Executor, 1, MapperData ++ FsmKeys), - maybe_done(State#state{mapper_data=MapperData1, fsms=NewFSMs}); - {error, exhausted_preflist} -> - MapperData1 = lists:keydelete(Executor, 1, MapperData), - maybe_done(State#state{fsms=FSMs1, mapper_data=MapperData1, pending=Pending++Reply}) - end - catch - exit:exhausted_preflist -> - %% At this point the preflist has been exhausted - Pending1 = Pending ++ Reply, - FSMs2 = update_counter(Executor, FSMs), - MapperData2 = update_inputs(Executor, VNode, BKey, MapperData), - maybe_done(State#state{fsms=FSMs2, mapper_data=MapperData2, pending=Pending1}) - end. - - - - diff --git a/src/riak_kv_mapper.erl b/src/riak_kv_mapper.erl deleted file mode 100644 index 52308d9ff3..0000000000 --- a/src/riak_kv_mapper.erl +++ /dev/null @@ -1,311 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_mapper: Executes map functions on input batches -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(riak_kv_mapper). - --behaviour(gen_fsm). - --include_lib("riak_kv_map_phase.hrl"). --include_lib("riak_kv_js_pools.hrl"). - --define(READ_TIMEOUT, 30000). - -%% API --export([start_link/5]). - -%% States --export([prepare/2, - recv_data/2, - do_map/2]). - -%% gen_fsm callbacks --export([init/1, - handle_event/3, - handle_sync_event/4, - handle_info/3, - terminate/3, - code_change/4]). - --record(state, {id, - cache_ref, - cache_key_base, - vnode, - vm, - qterm, - pending, - reqid, - data=[], - inputs, - phase}). - -start_link(VNode, Id, QTerm, MapInputs, PhasePid) -> - gen_fsm:start_link(?MODULE, [VNode, Id, QTerm, MapInputs, PhasePid], []). - -init([VNode, Id, QTerm0, MapInputs, PhasePid]) -> - erlang:link(PhasePid), - gen_fsm:send_event(PhasePid, {register_mapper, Id, self()}), - QTermFun = xform_link_walk(QTerm0), - {_, _, ReqId} = erlang:now(), - riak_kv_stat:update(mapper_start), - {ok, CacheRef} = riak_kv_mapred_cache:cache_ref(), - CacheKeyBase = generate_cache_key_base(QTermFun(undefined)), - {ok, VM} = reserve_jsvm(QTermFun(undefined)), - %% we need some way to reclaim the JS VM if it is busy doing something - %% when the rest of the MapReduce phase exists (e.g. on timeout) - %% easiest method is simply to link, such that the VM is also killed, - %% which will cause the supervisor to spin up a fresh one - if is_pid(VM) -> erlang:link(VM); - true -> ok %% erlang phases do not use VMs - end, - {ok, prepare, #state{id=Id, vnode=VNode, qterm=QTermFun, inputs=MapInputs, - cache_key_base=CacheKeyBase, reqid=ReqId, phase=PhasePid, - cache_ref=CacheRef, vm=VM}, 0}. - -prepare(timeout, State) -> - case fetch_cached_results(State) of - done -> - {stop, normal, State}; - State1 -> - case fetch_data(State1) of - done -> - {stop, normal, State1}; - NewState -> - {next_state, recv_data, NewState} - end - end. - -recv_data({r, Result, _Idx, ReqId}, #state{reqid=ReqId, data=Data0, - pending=Pending}=State) -> - Data = [Result|Data0], - - %% When we receive all data for the keys we sent out - %% switch to "map mode" and evaluate the map function - case length(Data) == length(Pending) of - false -> - {next_state, recv_data, State#state{data=Data}, ?READ_TIMEOUT}; - true -> - {next_state, do_map, State#state{data=Data}, 0} - end; -recv_data(timeout, #state{phase=Phase, id=Id}=State) -> - riak_kv_phase_proto:mapexec_error(Phase, {error, read_timeout}, Id), - {stop, normal, State#state{data=[], pending=[], inputs=[]}}. - -do_map(timeout, #state{data=Data, vm=VM, qterm=QTermFun, pending=Pending, - phase=Phase, id=Id, cache_key_base=CacheKeyBase, vnode=VNode, cache_ref=CacheRef}=State) -> - lists:foldl(fun(Obj, WorkingSet) -> - case find_input(obj_bkey(Obj), WorkingSet) of - {none, WorkingSet} -> - WorkingSet; - {Input, WorkingSet1} -> - case QTermFun(Input) of - {error, Error} -> - riak_kv_phase_proto:mapexec_error(Phase, {error, Error}, Id); - QTerm -> - CacheKey = generate_final_cachekey(CacheKeyBase, - Input#riak_kv_map_input.kd), - run_map(VNode, Id, QTerm, - Input#riak_kv_map_input.kd, - Obj, Phase, VM, CacheKey, CacheRef) - end, - WorkingSet1 - end end, Pending, Data), - case fetch_data(State) of - done -> - {stop, normal, State}; - NewState -> - {next_state, recv_data, NewState#state{data=[]}} - end. - -handle_event(_Event, StateName, State) -> - {next_state, StateName, State}. - -handle_sync_event(_Event, _From, StateName, State) -> - {reply, ignored, StateName, State}. - -handle_info(_Info, StateName, State) -> - {next_state, StateName, State}. - -terminate(_Reason, _StateName, #state{vm=VM, id=_Id, vnode=_VNode, reqid=_ReqId, phase=_PhasePid}=_State) -> - release_jsvm(VM), - riak_kv_stat:update(mapper_end), - _Reason. - -code_change(_OldVsn, StateName, State, _Extra) -> - {ok, StateName, State}. - -%% Internal functions -fetch_data(#state{inputs=[]}) -> - done; -fetch_data(#state{inputs=Inputs, reqid=ReqId, - vnode=VNode}=State) -> - {Current, Next} = split(Inputs), - BKeys = [Input#riak_kv_map_input.bkey || Input <- Current], - riak_kv_vnode:mget(VNode, BKeys, ReqId), - State#state{inputs=Next, pending=Current}. - -reserve_jsvm({erlang, _}) -> - {ok, undefined}; -reserve_jsvm({javascript, _}) -> - riak_kv_js_manager:reserve_batch_vm(?JSPOOL_MAP, 10). - -release_jsvm(undefined) -> - ok; -release_jsvm(VM) when is_pid(VM) -> - riak_kv_js_vm:finish_batch(VM). - -obj_bkey({{error, notfound},Bkey}) -> - Bkey; -obj_bkey(Obj) -> - {riak_object:bucket(Obj), riak_object:key(Obj)}. - -find_input(BKey, WorkingSet) -> - find_input(BKey, WorkingSet, WorkingSet). - -find_input(_BKey, [], CompleteSet) -> - {none, CompleteSet}; -find_input(BKey, [#riak_kv_map_input{bkey=BKey}=H|_], CompleteSet) -> - {H, lists:delete(H, CompleteSet)}; -find_input(BKey, [_|T], CompleteSet) -> - find_input(BKey, T, CompleteSet). - -run_map(VNode, Id, {erlang, {map, FunTerm, Arg, _}}, KD, Obj0, Phase, _VM, CacheKey, CacheRef) -> - Obj = case Obj0 of - {{error,notfound},_} -> - {error, notfound}; - _ -> - Obj0 - end, - BKey = obj_bkey(Obj0), - Result = try - case FunTerm of - {qfun, F} -> - {ok, (F)(Obj, KD, Arg)}; - {modfun, M, F} -> - {ok, M:F(Obj, KD, Arg)} - end - catch C:R -> - Reason = {C, R, erlang:get_stacktrace()}, - {error, Reason} - end, - case Result of - {ok, Value} -> - riak_kv_phase_proto:mapexec_result(Phase, VNode, obj_bkey(Obj0), Value, Id), - if - is_list(Value) -> - case CacheKey of - not_cached -> - ok; - _ -> - riak_kv_lru:put(CacheRef, BKey, CacheKey, Value) - end; - true -> - ok - end; - {error, _} -> - riak_kv_phase_proto:mapexec_error(Phase, Result, Id) - end; - -run_map(VNode, Id, {javascript, {map, _FunTerm, _Arg, _}}, KD, {{error, notfound},_}=Obj, Phase, _VM, _CacheKey, _CacheRef) -> - BKey = obj_bkey(Obj), - riak_kv_phase_proto:mapexec_result( - Phase, VNode, BKey, [{not_found, BKey, KD}], Id); -run_map(VNode, Id, {javascript, {map, FunTerm, Arg, _}}, KD, Obj, Phase, VM, CacheKey, CacheRef) -> - BKey = {riak_object:bucket(Obj), riak_object:key(Obj)}, - JSArgs = [riak_object:to_json(Obj), KD, Arg], - JSCall = {map, FunTerm, JSArgs}, - case riak_kv_js_vm:batch_blocking_dispatch(VM, JSCall) of - {ok, Result} -> - riak_kv_phase_proto:mapexec_result(Phase, VNode, obj_bkey(Obj), Result, Id), - if - is_list(Result) -> - case CacheKey of - not_cached -> - ok; - _ -> - riak_kv_lru:put(CacheRef, BKey, CacheKey, Result) - end; - true -> - ok - end; - Error -> - riak_kv_phase_proto:mapexec_error(Phase, Error, Id) - end. - -split(L) when length(L) =< 5 -> - {L, []}; -split(L) -> - lists:split(5, L). - -generate_cache_key_base({erlang, {map, {modfun, Mod, Fun}, Arg, _}}) -> - term_to_binary([Mod, Fun, Arg], [compressed]); -generate_cache_key_base({erlang, _}) -> - not_cached; -generate_cache_key_base({javascript, {map, {jsanon, Source}, Arg, _}}) -> - term_to_binary([Source, Arg], [compressed]); -generate_cache_key_base({javascript, {map, {jsfun, Name}, Arg, _}}) -> - term_to_binary([Name, Arg]). - -generate_final_cachekey(not_cached, _KD) -> - not_cached; -generate_final_cachekey(CacheKey, KD) -> - CacheKey1 = list_to_binary([CacheKey, term_to_binary(KD)]), - mochihex:to_hex(crypto:sha(CacheKey1)). - -fetch_cached_results(#state{cache_key_base=not_cached}=State) -> - State; -fetch_cached_results(#state{vnode=VNode, id=Id, phase=Phase, cache_ref=CacheRef, - cache_key_base=CacheKeyBase, inputs=Inputs}=State) -> - case fetch_cached_results(VNode, Id, Phase, CacheRef, CacheKeyBase, Inputs, []) of - done -> - done; - Remainder -> - State#state{inputs=Remainder} - end. - -fetch_cached_results(_VNode, _Id, _Phase, _CacheRef, _CacheKeyBase, [], []) -> - done; -fetch_cached_results(_VNode, _Id, _Phase, _CacheRef, _CacheKeyBase, [], Accum) -> - Accum; -fetch_cached_results(VNode, Id, Phase, CacheRef, CacheKeyBase, [#riak_kv_map_input{bkey=BKey, kd=KD}=H|T], Accum) -> - CacheKey = generate_final_cachekey(CacheKeyBase, KD), - case riak_kv_lru:fetch(CacheRef, BKey, CacheKey) of - notfound -> - fetch_cached_results(VNode, Id, Phase, CacheRef, CacheKey, T, [H|Accum]); - Result -> - riak_kv_phase_proto:mapexec_result(Phase, VNode, BKey, Result, Id), - fetch_cached_results(VNode, Id, Phase, CacheRef, CacheKey, T, Accum) - end. - -xform_link_walk({erlang, {link, LB, LT, LAcc}}=QTerm) -> - fun(Input) -> - case Input of - undefined -> - QTerm; - _ -> - case proplists:get_value(linkfun, Input#riak_kv_map_input.bprops) of - undefined -> - {error, missing_linkfun}; - LinkFun -> - {erlang, {map, LinkFun, {LB, LT}, LAcc}} - end - end end; -xform_link_walk(QTerm) -> - fun(_) -> QTerm end. diff --git a/src/riak_kv_mapper_sup.erl b/src/riak_kv_mapper_sup.erl deleted file mode 100644 index 9040afa0c7..0000000000 --- a/src/riak_kv_mapper_sup.erl +++ /dev/null @@ -1,48 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_mapper_sup: Supervisor for starting mapper processes -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(riak_kv_mapper_sup). - --behaviour(supervisor). - -%% API --export([start_link/0, - new_mapper/5]). - -%% Supervisor callbacks --export([init/1]). - -new_mapper({_, Node}=VNode, Id, QTerm, MapInputs, PhasePid) -> - start_child(Node, [VNode, Id, QTerm, MapInputs, PhasePid]). - -start_link() -> - supervisor:start_link({local, ?MODULE}, ?MODULE, []). - -init([]) -> - SupFlags = {simple_one_for_one, 0, 1}, - Process = {undefined, - {riak_kv_mapper, start_link, []}, - temporary, brutal_kill, worker, dynamic}, - {ok, {SupFlags, [Process]}}. - -%% Internal functions -start_child(Node, Args) -> - supervisor:start_child({?MODULE, Node}, Args). diff --git a/src/riak_kv_mapred_cache.erl b/src/riak_kv_mapred_cache.erl deleted file mode 100644 index 261ba4dbfe..0000000000 --- a/src/riak_kv_mapred_cache.erl +++ /dev/null @@ -1,80 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_mapred_cache: gen_server to manage starting up and ejecting -%% old data from the MapReduce cache -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(riak_kv_mapred_cache). - --behaviour(gen_server). - -%% API --export([start_link/0, - clear/0, - cache_ref/0, - eject/1]). - -%% gen_server callbacks --export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3]). - --define(SERVER, ?MODULE). - --record(state, {lru}). - -clear() -> - gen_server:call(?SERVER, clear, infinity). - -eject(BKey) -> - gen_server:cast(?SERVER, {eject, BKey}). - -cache_ref() -> - gen_server:call(?SERVER, cache_ref, infinity). - -start_link() -> - gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). - -init([]) -> - CacheSize = app_helper:get_env(riak_kv, map_cache_size, 5000), - {ok, #state{lru=riak_kv_lru:new(CacheSize)}}. - -handle_call(clear, _From, #state{lru=LRU}=State) -> - riak_kv_lru:clear(LRU), - {reply, ok, State}; -handle_call(cache_ref, _From, #state{lru=LRU}=State) -> - {reply, {ok, LRU}, State}; -handle_call(_Request, _From, State) -> - {reply, ignore, State}. - -handle_cast({eject, BKey}, #state{lru=LRU}=State) -> - riak_kv_lru:clear_bkey(LRU, BKey), - {noreply, State}; -handle_cast(_Msg, State) -> - {noreply, State}. - -handle_info(_Info, State) -> - {noreply, State}. - -terminate(_Reason, _State) -> - ok. - -code_change(_OldVsn, State, _Extra) -> - {ok, State}. - -%% Internal functions diff --git a/src/riak_kv_mapred_planner.erl b/src/riak_kv_mapred_planner.erl deleted file mode 100644 index dd2c55d6b7..0000000000 --- a/src/riak_kv_mapred_planner.erl +++ /dev/null @@ -1,75 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_mapred_planner: Plans batched mapreduce processing -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(riak_kv_mapred_planner). --author('John Muellerleile '). --author('Kevin Smith '). - --include("riak_kv_map_phase.hrl"). - --export([plan_map/1]). - -plan_map(Inputs) -> - build_claim_list(add_map_inputs(Inputs)). - -%% Internal functions -build_claim_list(InputData) -> - {keys, Keys} = lists:keyfind(keys, 1, InputData), - InputData1 = lists:keydelete(keys, 1, InputData), - F = fun({_, KeysA} , {_, KeysB}) -> length(KeysA) =< length(KeysB) end, - PartList0 = lists:sort(F, InputData1), - claim_keys(PartList0, [], Keys). - -claim_keys([], [], _) -> - exit(exhausted_preflist); -claim_keys(_, ClaimList, []) -> - ClaimList; -claim_keys([], _, _) -> - exit(exhausted_preflist); -claim_keys([H|T], ClaimList, Keys) -> - {P, PKeys} = H, - PKeys1 = lists:filter(fun(PK) -> - lists:member(PK, Keys) - end, PKeys), - case PKeys1 == [] of - true -> - claim_keys(T, ClaimList, Keys); - false -> - NewKeys = lists:subtract(Keys, PKeys1), - claim_keys(T, ClaimList ++ [{P, PKeys1}], NewKeys) - end. - -add_map_inputs(Inputs) -> - add_map_inputs(Inputs, [{keys, []}]). -add_map_inputs([], InputData) -> - InputData; -add_map_inputs([#riak_kv_map_input{preflist=PList}=H|T], InputData) -> - {keys, Keys} = lists:keyfind(keys, 1, InputData), - InputData1 = lists:foldl(fun(P, Acc) -> - case lists:keyfind(P, 1, Acc) of - false -> - lists:keystore(P, 1, Acc, {P, [H]}); - {P, PKeys} -> - lists:keystore(P, 1, Acc, {P, PKeys ++ [H]}) - end - end, InputData, PList), - add_map_inputs(T, lists:keystore(keys, 1, InputData1, {keys, Keys ++ [H]})). diff --git a/src/riak_kv_mapred_query.erl b/src/riak_kv_mapred_query.erl deleted file mode 100644 index 676a927113..0000000000 --- a/src/riak_kv_mapred_query.erl +++ /dev/null @@ -1,214 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_mapred_query: driver for mapreduce query -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc riak_kv_mapred_query is the driver of a mapreduce query. -%% -%% Map phases are expected to have inputs of the form -%% [{Bucket,Key}] or [{{Bucket,Key},KeyData}] (the first form is -%% equivalent to [{{Bucket,Key},undefined}]) and will execute -%% with locality to each key and must return a list that is valid -%% input to the next phase -%% -%% Reduce phases take any list, but the function must be -%% commutative and associative, and the next phase will block -%% until the reduce phase is entirely done, and the reduce fun -%% must return a list that is valid input to the next phase -%% -%% Valid terms for Query: -%%

    -%%
  • {link, Bucket, Tag, Acc}
  • -%%
  • {map, FunTerm, Arg, Acc}
  • -%%
  • {reduce, FunTerm, Arg, Acc}
  • -%%
-%% where FunTerm is one of: -%%
    -%%
  • {modfun, Mod, Fun} : Mod and Fun both atoms -> -%% Mod:Fun(Object,KeyData,Arg)
  • -%%
  • {qfun, Fun} : Fun is an actual fun -> -%% Fun(Object,KeyData,Arg)
  • -%%
  • {strfun, Fun} : Fun is a string (list or binary) -%% containing the definition of an anonymous -%% Erlang function.
  • -%%
-%% @type mapred_queryterm() = -%% {map, mapred_funterm(), Arg :: term(), -%% Accumulate :: boolean()} | -%% {reduce, mapred_funterm(), Arg :: term(), -%% Accumulate :: boolean()} | -%% {link, Bucket :: riak_object:bucket(), Tag :: term(), -%% Accumulate :: boolean()} -%% @type mapred_funterm() = -%% {modfun, Module :: atom(), Function :: atom()}| -%% {qfun, function()}| -%% {strfun, list() | binary()} -%% @type mapred_result() = [term()] - --module(riak_kv_mapred_query). - --export([start/6]). - -start(Node, Client, ReqId, Query0, ResultTransformer, Timeout) -> - EffectiveTimeout = erlang:trunc(Timeout * 1.1), - case check_query_syntax(Query0) of - {ok, Query} -> - luke:new_flow(Node, Client, ReqId, Query, ResultTransformer, EffectiveTimeout); - {bad_qterm, QTerm} -> - {stop, {bad_qterm, QTerm}} - end. - -check_query_syntax(Query) -> - check_query_syntax(lists:reverse(Query), []). - -check_query_syntax([], Accum) -> - {ok, Accum}; -check_query_syntax([QTerm={QTermType, QueryFun, Misc, Acc}|Rest], Accum) when is_boolean(Acc) -> - PhaseDef = case QTermType of - link -> - {phase_mod(link), phase_behavior(link, QueryFun, Acc), [{erlang, QTerm}]}; - T when T =:= map orelse T=:= reduce -> - case QueryFun of - {modfun, Mod, Fun} when is_atom(Mod), - is_atom(Fun) -> - {phase_mod(T), phase_behavior(T, QueryFun, Acc), [{erlang, QTerm}]}; - {qfun, Fun} when is_function(Fun) -> - {phase_mod(T), phase_behavior(T, QueryFun, Acc), [{erlang, QTerm}]}; - {strfun, FunStr} -> - handle_strfun(FunStr, QTerm); - {jsanon, JS} when is_binary(JS) -> - {phase_mod(T), phase_behavior(T, QueryFun, Acc), [{javascript, QTerm}]}; - {jsanon, {Bucket, Key}} when is_binary(Bucket), - is_binary(Key) -> - case fetch_src(Bucket, Key) of - {ok, JS} -> - {phase_mod(T), phase_behavior(T, QueryFun, Acc), [{javascript, - {T, {jsanon, JS}, Misc, Acc}}]}; - _ -> - {bad_qterm, QTerm} - end; - {jsfun, JS} when is_binary(JS) -> - {phase_mod(T), phase_behavior(T, QueryFun, Acc), [{javascript, QTerm}]}; - _ -> - {bad_qterm, QTerm} - end - end, - case PhaseDef of - {bad_qterm, _} -> - PhaseDef; - _ -> - check_query_syntax(Rest, [PhaseDef|Accum]) - end. - -phase_mod(link) -> - riak_kv_map_phase; -phase_mod(map) -> - riak_kv_map_phase; -phase_mod(reduce) -> - riak_kv_reduce_phase. - -phase_behavior(link, _QueryFun, true) -> - [accumulate]; -phase_behavior(link, _QueryFun, false) -> - []; -phase_behavior(map, _QueryFun, true) -> - [accumulate]; -phase_behavior(map, _QueryFun, false) -> - []; -phase_behavior(reduce, _QueryFun, Accumulate) -> - Behaviors0 = [{converge, 2}], - case Accumulate of - true -> - [accumulate|Behaviors0]; - false -> - Behaviors0 - end. - -fetch_src(Bucket, Key) -> - {ok, Client} = riak:local_client(), - case Client:get(Bucket, Key, 1) of - {ok, Obj} -> - {ok, riak_object:get_value(Obj)}; - _ -> - {error, bad_fetch} - end. - -define_anon_erl(FunStr) when is_binary(FunStr) -> - define_anon_erl(binary_to_list(FunStr)); -define_anon_erl(FunStr) when is_list(FunStr) -> - {ok, Tokens, _} = erl_scan:string(FunStr), - {ok, [Form]} = erl_parse:parse_exprs(Tokens), - {value, Fun, _} = erl_eval:expr(Form, erl_eval:new_bindings()), - Fun. - -handle_strfun(FunStr, QTerm) -> - case application:get_env(riak_kv, allow_strfun) of - {ok, true} -> - handle_strfun1(FunStr, QTerm); - _ -> - {bad_qterm, QTerm} - end. - -handle_strfun1({Bucket, Key}, QTerm) when is_binary(Bucket), is_binary(Key) -> - case fetch_src(Bucket, Key) of - {ok, FunStr} -> - handle_strfun(FunStr, QTerm); - _ -> - {bad_qterm, QTerm} - end; -handle_strfun1(FunStr, QTerm={T, QueryFun, Misc, Acc}) - when is_binary(FunStr); is_list(FunStr) -> - case catch define_anon_erl(FunStr) of - Fun when is_function(Fun, 3) -> - {phase_mod(T), phase_behavior(T, QueryFun, Acc), - [{erlang, {T, {qfun, Fun}, Misc, Acc}}]}; - _ -> - {bad_qterm, QTerm} - end; -handle_strfun1(_, QTerm) -> - {bad_qterm, QTerm}. - - --ifdef(TEST). --include_lib("eunit/include/eunit.hrl"). - -strfun_test() -> - application:set_env(riak_kv, allow_strfun, true), - Query = [{map, {strfun, "fun(_,_,_) -> [] end."}, none, true}], - {ok, [{riak_kv_map_phase, [accumulate], [{erlang, {map, {qfun, Fun}, none, true}}]}]} - = check_query_syntax(Query), - ?assertEqual(true, erlang:is_function(Fun, 3)). - -bad_strfun_test() -> - application:set_env(riak_kv, allow_strfun, true), - Query = [{map, {strfun, "fun(_,_,_) -> [] end"}, none, true}], - {bad_qterm, _} = check_query_syntax(Query). - -unbound_var_strfun_test() -> - application:set_env(riak_kv, allow_strfun, true), - Query = [{map, {strfun, "fun(_,_,_) -> [UnboundVar] end."}, none, true}], - {bad_qterm, _} = check_query_syntax(Query). - -disabled_strfun_test() -> - application:set_env(riak_kv, allow_strfun, false), - Query = [{map, {strfun, "fun(_,_,_) -> [] end."}, none, true}], - {bad_qterm, _} = check_query_syntax(Query). - --endif. diff --git a/src/riak_kv_mapred_term.erl b/src/riak_kv_mapred_term.erl index e6c00b968f..96a1593b53 100644 --- a/src/riak_kv_mapred_term.erl +++ b/src/riak_kv_mapred_term.erl @@ -110,7 +110,7 @@ valid_input_targets(Invalid) -> {error, {"Inputs target tuples must be {B,K} or {{B,K},KeyData}:", Invalid}}. %% Return ok if query are valid, {error, Reason} if not. Not very strong validation -%% done here as riak_kv_mapred_query will check this. +%% done here as endpoints and riak_kv_mrc_pipe will check this. parse_query(Query) when is_list(Query) -> {ok, Query}; parse_query(Invalid) -> diff --git a/src/riak_kv_mapreduce.erl b/src/riak_kv_mapreduce.erl index c9908c0a63..a356c8a4db 100644 --- a/src/riak_kv_mapreduce.erl +++ b/src/riak_kv_mapreduce.erl @@ -58,7 +58,7 @@ %% @spec map_identity(boolean()) -> map_phase_spec() %% @doc Produces a spec for a map phase that simply returns %% each object it's handed. That is: -%% Client:mapred(BucketKeys, [map_identity(true)]). +%% riak_kv_mrc_pipe:mapred(BucketKeys, [map_identity(true)]). %% Would return all of the objects named by BucketKeys. map_identity(Acc) -> {map, {modfun, riak_kv_mapreduce, map_identity}, none, Acc}. @@ -72,7 +72,7 @@ map_identity(RiakObject, _, _) -> [RiakObject]. %% @doc Produces a spec for a map phase that simply returns %% the values of the objects from the input to the phase. %% That is: -%% Client:mapred(BucketKeys, [map_object_value(true)]). +%% riak_kv_mrc_pipe:mapred(BucketKeys, [map_object_value(true)]). %% Would return a list that contains the value of each %% object named by BucketKeys. map_object_value(Acc) -> @@ -227,8 +227,8 @@ reduce_sort(List, _) -> %% The original purpose of this function was to count %% the results of a key-listing. For example: %%``` -%% [KeyCount] = C:mapred(<<"my_bucket">>, -%% [riak_kv_mapreduce:reduce_count_inputs(true)]). +%% [KeyCount] = riak_kv_mrc_pipe:mapred(<<"my_bucket">>, +%% [riak_kv_mapreduce:reduce_count_inputs(true)]). %%''' %% KeyCount will contain the number of keys found in "my_bucket". reduce_count_inputs(Acc) -> diff --git a/src/riak_kv_memory_backend.erl b/src/riak_kv_memory_backend.erl index fcd1f5d6b2..102c3ce085 100644 --- a/src/riak_kv_memory_backend.erl +++ b/src/riak_kv_memory_backend.erl @@ -32,6 +32,7 @@ %%
    %%
  • `ttl' - The time in seconds that an object should live before being expired.
  • %%
  • `max_memory' - The amount of memory in megabytes to limit the backend to.
  • +%%
  • `test' - When true, allow public access to ETS tables so they can be cleared efficiently.
  • %%
%% @@ -55,15 +56,29 @@ status/1, callback/3]). +%% "Testing" backend API +-export([reset/0]). + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). +-compile([export_all]). -endif. -define(API_VERSION, 1). --define(CAPABILITIES, [async_fold]). +-define(CAPABILITIES, [async_fold, indexes]). + +%% Macros for working with indexes +-define(DELETE_PTN(B,K), {{B,'_','_',K},'_'}). --record(state, {data_ref :: integer() | atom(), - time_ref :: integer() | atom(), +%% ETS table name macros so we can break encapsulation for testing +%% mode +-define(DNAME(P), list_to_atom("riak_kv_"++integer_to_list(P))). +-define(INAME(P), list_to_atom("riak_kv_"++integer_to_list(P)++"_i")). +-define(TNAME(P), list_to_atom("riak_kv_"++integer_to_list(P)++"_t")). + +-record(state, {data_ref :: ets:tid(), + index_ref :: ets:tid(), + time_ref :: ets:tid(), max_memory :: undefined | integer(), used_memory=0 :: integer(), ttl :: integer()}). @@ -98,16 +113,24 @@ capabilities(_, _) -> start(Partition, Config) -> TTL = app_helper:get_prop_or_env(ttl, Config, memory_backend), MemoryMB = app_helper:get_prop_or_env(max_memory, Config, memory_backend), + TableOpts = case app_helper:get_prop_or_env(test, Config, memory_backend) of + true -> + [ordered_set, public, named_table]; + _ -> + [ordered_set] + end, case MemoryMB of undefined -> MaxMemory = undefined, TimeRef = undefined; _ -> MaxMemory = MemoryMB * 1024 * 1024, - TimeRef = ets:new(list_to_atom(integer_to_list(Partition)), [ordered_set]) + TimeRef = ets:new(?TNAME(Partition), TableOpts) end, - DataRef = ets:new(list_to_atom(integer_to_list(Partition)), []), + IndexRef = ets:new(?INAME(Partition), TableOpts), + DataRef = ets:new(?DNAME(Partition), TableOpts), {ok, #state{data_ref=DataRef, + index_ref=IndexRef, max_memory=MaxMemory, time_ref=TimeRef, ttl=TTL}}. @@ -115,9 +138,11 @@ start(Partition, Config) -> %% @doc Stop the memory backend -spec stop(state()) -> ok. stop(#state{data_ref=DataRef, + index_ref=IndexRef, max_memory=MaxMemory, time_ref=TimeRef}) -> catch ets:delete(DataRef), + catch ets:delete(IndexRef), case MaxMemory of undefined -> ok; @@ -132,14 +157,27 @@ stop(#state{data_ref=DataRef, {ok, not_found, state()} | {error, term(), state()}. get(Bucket, Key, State=#state{data_ref=DataRef, + index_ref=IndexRef, + used_memory=UsedMemory, + max_memory=MaxMemory, ttl=TTL}) -> case ets:lookup(DataRef, {Bucket, Key}) of [] -> {error, not_found, State}; - [{{Bucket, Key}, {{ts, Timestamp}, Val}}] -> + [{{Bucket, Key}, {{ts, Timestamp}, Val}}=Object] -> case exceeds_ttl(Timestamp, TTL) of true -> - delete(Bucket, Key, undefined, State), - {error, not_found, State}; + %% Because we do not have the IndexSpecs, we must + %% delete the object directly and all index + %% entries blindly using match_delete. + ets:delete(DataRef, {Bucket, Key}), + ets:match_delete(IndexRef, ?DELETE_PTN(Bucket, Key)), + case MaxMemory of + undefined -> + UsedMemory1 = UsedMemory; + _ -> + UsedMemory1 = UsedMemory - object_size(Object) + end, + {error, not_found, State#state{used_memory=UsedMemory1}}; false -> {ok, Val, State} end; @@ -150,55 +188,45 @@ get(Bucket, Key, State=#state{data_ref=DataRef, end. %% @doc Insert an object into the memory backend. -%% NOTE: The memory backend does not currently -%% support secondary indexing and the _IndexSpecs -%% parameter is ignored. -type index_spec() :: {add, Index, SecondaryKey} | {remove, Index, SecondaryKey}. -spec put(riak_object:bucket(), riak_object:key(), [index_spec()], binary(), state()) -> - {ok, state()} | - {error, term(), state()}. -put(Bucket, PrimaryKey, _IndexSpecs, Val, State=#state{data_ref=DataRef, - max_memory=MaxMemory, - time_ref=TimeRef, - ttl=TTL, - used_memory=UsedMemory}) -> - Now = now(), + {ok, state()}. +put(Bucket, PrimaryKey, IndexSpecs, Val, State=#state{data_ref=DataRef, + index_ref=IndexRef, + max_memory=MaxMemory, + time_ref=TimeRef, + ttl=TTL, + used_memory=UsedMemory}) -> + Now = os:timestamp(), case TTL of undefined -> Val1 = Val; _ -> Val1 = {{ts, Now}, Val} end, - case do_put(Bucket, PrimaryKey, Val1, DataRef) of - {ok, Size} -> - %% If the memory is capped update timestamp table - %% and check if the memory usage is over the cap. - case MaxMemory of - undefined -> - UsedMemory1 = UsedMemory; - _ -> - time_entry(Bucket, PrimaryKey, Now, TimeRef), - Freed = trim_data_table(MaxMemory, - UsedMemory + Size, - DataRef, - TimeRef, - 0), - UsedMemory1 = UsedMemory + Size - Freed - end, - {ok, State#state{used_memory=UsedMemory1}}; - {error, Reason} -> - {error, Reason, State} - end. + {ok, Size} = do_put(Bucket, PrimaryKey, Val1, IndexSpecs, DataRef, IndexRef), + case MaxMemory of + undefined -> + UsedMemory1 = UsedMemory; + _ -> + time_entry(Bucket, PrimaryKey, Now, TimeRef), + Freed = trim_data_table(MaxMemory, + UsedMemory + Size, + DataRef, + TimeRef, + IndexRef, + 0), + UsedMemory1 = UsedMemory + Size - Freed + end, + {ok, State#state{used_memory=UsedMemory1}}. %% @doc Delete an object from the memory backend -%% NOTE: The memory backend does not currently -%% support secondary indexing and the _IndexSpecs -%% parameter is ignored. -spec delete(riak_object:bucket(), riak_object:key(), [index_spec()], state()) -> {ok, state()}. -delete(Bucket, Key, _IndexSpecs, State=#state{data_ref=DataRef, - time_ref=TimeRef, - used_memory=UsedMemory}) -> +delete(Bucket, Key, IndexSpecs, State=#state{data_ref=DataRef, + index_ref=IndexRef, + time_ref=TimeRef, + used_memory=UsedMemory}) -> case TimeRef of undefined -> UsedMemory1 = UsedMemory; @@ -215,6 +243,7 @@ delete(Bucket, Key, _IndexSpecs, State=#state{data_ref=DataRef, UsedMemory1 = UsedMemory end end, + update_indexes(Bucket, Key, IndexSpecs, IndexRef), ets:delete(DataRef, {Bucket, Key}), {ok, State#state{used_memory=UsedMemory1}}. @@ -243,15 +272,33 @@ fold_buckets(FoldBucketsFun, Acc, Opts, #state{data_ref=DataRef}) -> any(), [{atom(), term()}], state()) -> {ok, term()} | {async, fun()}. -fold_keys(FoldKeysFun, Acc, Opts, #state{data_ref=DataRef}) -> - Bucket = proplists:get_value(bucket, Opts), - FoldFun = fold_keys_fun(FoldKeysFun, Bucket), +fold_keys(FoldKeysFun, Acc, Opts, #state{data_ref=DataRef, + index_ref=IndexRef}) -> + + %% Figure out how we should limit the fold: by bucket, by + %% secondary index, or neither (fold across everything.) + Bucket = lists:keyfind(bucket, 1, Opts), + Index = lists:keyfind(index, 1, Opts), + + %% Multiple limiters may exist. Take the most specific limiter, + %% get an appropriate folder function. + Folder = if + Index /= false -> + FoldFun = fold_keys_fun(FoldKeysFun, Index), + get_index_folder(FoldFun, Acc, Index, DataRef, IndexRef); + Bucket /= false -> + FoldFun = fold_keys_fun(FoldKeysFun, Bucket), + get_folder(FoldFun, Acc, DataRef); + true -> + FoldFun = fold_keys_fun(FoldKeysFun, undefined), + get_folder(FoldFun, Acc, DataRef) + end, + case lists:member(async_fold, Opts) of true -> - {async, get_folder(FoldFun, Acc, DataRef)}; + {async, Folder}; false -> - Acc0 = ets:foldl(FoldFun, Acc, DataRef), - {ok, Acc0} + {ok, Folder()} end. %% @doc Fold over all the objects for one or all buckets. @@ -273,8 +320,10 @@ fold_objects(FoldObjectsFun, Acc, Opts, #state{data_ref=DataRef}) -> %% @doc Delete all objects from this memory backend -spec drop(state()) -> {ok, state()}. drop(State=#state{data_ref=DataRef, + index_ref=IndexRef, time_ref=TimeRef}) -> ets:delete_all_objects(DataRef), + ets:delete_all_objects(IndexRef), case TimeRef of undefined -> ok; @@ -292,14 +341,18 @@ is_empty(#state{data_ref=DataRef}) -> %% @doc Get the status information for this memory backend -spec status(state()) -> [{atom(), term()}]. status(#state{data_ref=DataRef, + index_ref=IndexRef, time_ref=TimeRef}) -> DataStatus = ets:info(DataRef), + IndexStatus = ets:info(IndexRef), case TimeRef of undefined -> - [{data_table_status, DataStatus}]; + [{data_table_status, DataStatus}, + {index_table_status, IndexStatus}]; _ -> TimeStatus = ets:info(TimeRef), [{data_table_status, DataStatus}, + {index_table_status, IndexStatus}, {time_table_status, TimeStatus}] end. @@ -308,6 +361,24 @@ status(#state{data_ref=DataRef, callback(_Ref, _Msg, State) -> {ok, State}. +%% @doc Resets state of all running memory backends on the local +%% node. The `riak_kv' environment variable `memory_backend' must +%% contain the `test' property, set to `true' for this to work. +-spec reset() -> ok | {error, reset_disabled}. +reset() -> + reset(app_helper:get_env(memory_backend, test, app_helper:get_env(riak_kv, test)), app_helper:get_env(riak_kv, storage_backend)). + +reset(true, ?MODULE) -> + {ok, Ring} = riak_core_ring_manager:get_my_ring(), + [ begin + catch ets:delete_all_objects(?DNAME(I)), + catch ets:delete_all_objects(?INAME(I)), + catch ets:delete_all_objects(?TNAME(I)) + end || I <- riak_core_ring:my_indices(Ring) ], + ok; +reset(_, _) -> + {error, reset_disabled}. + %% =================================================================== %% Internal functions %% =================================================================== @@ -333,32 +404,46 @@ fold_buckets_fun(FoldBucketsFun) -> %% Return a function to fold over keys on this backend fold_keys_fun(FoldKeysFun, undefined) -> fun({{Bucket, Key}, _}, Acc) -> - FoldKeysFun(Bucket, Key, Acc) + FoldKeysFun(Bucket, Key, Acc); + (_, Acc) -> + Acc end; -fold_keys_fun(FoldKeysFun, Bucket) -> - fun({{B, Key}, _}, Acc) -> - case B =:= Bucket of - true -> - FoldKeysFun(Bucket, Key, Acc); - false -> - Acc - end +fold_keys_fun(FoldKeysFun, {bucket, FilterBucket}) -> + fun({{Bucket, Key}, _}, Acc) when Bucket == FilterBucket -> + FoldKeysFun(Bucket, Key, Acc); + (_, Acc) -> + Acc + end; +fold_keys_fun(FoldKeysFun, {index, FilterBucket, {eq, <<"$bucket">>, _}}) -> + %% 2I exact match query on special $bucket field... + fold_keys_fun(FoldKeysFun, {bucket, FilterBucket}); +fold_keys_fun(FoldKeysFun, {index, FilterBucket, {range, <<"$key">>, _, _}}) -> + %% 2I range query on special $key field... + fold_keys_fun(FoldKeysFun, {bucket, FilterBucket}); +fold_keys_fun(FoldKeysFun, {index, FilterBucket, {eq, <<"$key">>, _}}) -> + %% 2I eq query on special $key field... + fold_keys_fun(FoldKeysFun, {bucket, FilterBucket}); +fold_keys_fun(FoldKeysFun, {index, _FilterBucket, _Query}) -> + fun({{Bucket, _FilterField, _FilterTerm, Key}, _}, Acc) -> + FoldKeysFun(Bucket, Key, Acc); + (_, Acc) -> + Acc end. + %% @private %% Return a function to fold over keys on this backend fold_objects_fun(FoldObjectsFun, undefined) -> fun({{Bucket, Key}, Value}, Acc) -> - FoldObjectsFun(Bucket, Key, Value, Acc) + FoldObjectsFun(Bucket, Key, Value, Acc); + (_, Acc) -> + Acc end; -fold_objects_fun(FoldObjectsFun, Bucket) -> - fun({{B, Key}, Value}, Acc) -> - case B =:= Bucket of - true -> - FoldObjectsFun(Bucket, Key, Value, Acc); - false -> - Acc - end +fold_objects_fun(FoldObjectsFun, FilterBucket) -> + fun({{Bucket, Key}, Value}, Acc) when Bucket == FilterBucket-> + FoldObjectsFun(Bucket, Key, Value, Acc); + (_, Acc) -> + Acc end. %% @private @@ -368,16 +453,92 @@ get_folder(FoldFun, Acc, DataRef) -> end. %% @private -do_put(Bucket, Key, Val, Ref) -> +get_index_folder(Folder, Acc0, {index, Bucket, {eq, <<"$bucket">>, _}}, DataRef, _) -> + %% For the special $bucket index, turn it into a fold over the + %% data table. + fun() -> + key_range_folder(Folder, Acc0, DataRef, {Bucket, <<>>}, Bucket) + end; +get_index_folder(Folder, Acc0, {index, Bucket, {range, <<"$key">>, Min, Max}}, DataRef, _) -> + %% For the special range lookup on the $key index, turn it into a + %% fold on the data table + fun() -> + key_range_folder(Folder, Acc0, DataRef, {Bucket, Min}, {Bucket, Min, Max}) + end; +get_index_folder(Folder, Acc0, {index, Bucket, {eq, <<"$key">>, Val}}, DataRef, IndexRef) -> + get_index_folder(Folder, Acc0, {index, Bucket, {range, <<"$key">>, Val, Val}}, DataRef, IndexRef); +get_index_folder(Folder, Acc0, {index, Bucket, {eq, Field, Term}}, _, IndexRef) -> + fun() -> + index_range_folder(Folder, Acc0, IndexRef, {Bucket, Field, Term, undefined}, {Bucket, Field, Term, Term}) + end; +get_index_folder(Folder, Acc0, {index, Bucket, {range, Field, Min, Max}}, _, IndexRef) -> + fun() -> + index_range_folder(Folder, Acc0, IndexRef, {Bucket, Field, Min, undefined}, {Bucket, Field, Min, Max}) + end. + + +%% Iterates over a range of keys, for the special $key and $bucket +%% indexes. +%% @private +-spec key_range_folder(function(), term(), ets:tid(), {riak_object:bucket(), riak_object:key()}, binary() | {riak_object:bucket(), term(), term()}) -> term(). +key_range_folder(Folder, Acc0, DataRef, {B,_}=DataKey, B) -> + case ets:lookup(DataRef, DataKey) of + [] -> + key_range_folder(Folder, Acc0, DataRef, ets:next(DataRef, DataKey), B); + [Object] -> + Acc = Folder(Object, Acc0), + key_range_folder(Folder, Acc, DataRef, ets:next(DataRef, DataKey), B) + end; +key_range_folder(Folder, Acc0, DataRef, {B,K}=DataKey, {B, Min, Max}=Query) when K >= Min, K =< Max -> + case ets:lookup(DataRef, DataKey) of + [] -> + key_range_folder(Folder, Acc0, DataRef, ets:next(DataRef, DataKey), Query); + [Object] -> + Acc = Folder(Object, Acc0), + key_range_folder(Folder, Acc, DataRef, ets:next(DataRef, DataKey), Query) + end; +key_range_folder(_Folder, Acc, _DataRef, _DataKey, _Query) -> + Acc. + +%% Iterates over a range of index postings +index_range_folder(Folder, Acc0, IndexRef, {B, I, V, _K}=IndexKey, {B, I, Min, Max}=Query) when V >= Min, V =< Max -> + case ets:lookup(IndexRef, IndexKey) of + [] -> + %% This will happen on the first iteration, where the key + %% does not exist. In all other cases, ETS will give us a + %% real key from next/2. + index_range_folder(Folder, Acc0, IndexRef, ets:next(IndexRef, IndexKey), Query); + [Posting] -> + Acc = Folder(Posting, Acc0), + index_range_folder(Folder, Acc, IndexRef, ets:next(IndexRef, IndexKey), Query) + end; +index_range_folder(_Folder, Acc, _IndexRef, _IndexKey, _Query) -> + Acc. + + +%% @private +do_put(Bucket, Key, Val, IndexSpecs, DataRef, IndexRef) -> Object = {{Bucket, Key}, Val}, - true = ets:insert(Ref, Object), + true = ets:insert(DataRef, Object), + update_indexes(Bucket, Key, IndexSpecs, IndexRef), {ok, object_size(Object)}. %% Check if this timestamp is past the ttl setting. exceeds_ttl(Timestamp, TTL) -> - Diff = (timer:now_diff(now(), Timestamp) / 1000 / 1000), + Diff = (timer:now_diff(os:timestamp(), Timestamp) / 1000 / 1000), Diff > TTL. +update_indexes(_Bucket, _Key, undefined, _IndexRef) -> + ok; +update_indexes(_Bucket, _Key, [], _IndexRef) -> + ok; +update_indexes(Bucket, Key, [{remove, Field, Value}|Rest], IndexRef) -> + true = ets:delete(IndexRef, {Bucket, Field, Value, Key}), + update_indexes(Bucket, Key, Rest, IndexRef); +update_indexes(Bucket, Key, [{add, Field, Value}|Rest], IndexRef) -> + true = ets:insert(IndexRef, {{Bucket, Field, Value, Key}, <<>>}), + update_indexes(Bucket, Key, Rest, IndexRef). + %% @private time_entry(Bucket, Key, Now, TimeRef) -> ets:insert(TimeRef, {Now, {Bucket, Key}}). @@ -385,20 +546,21 @@ time_entry(Bucket, Key, Now, TimeRef) -> %% @private %% @doc Dump some entries if the max memory size has %% been breached. -trim_data_table(MaxMemory, UsedMemory, _, _, Freed) when +trim_data_table(MaxMemory, UsedMemory, _, _, _, Freed) when (UsedMemory - Freed) =< MaxMemory -> Freed; -trim_data_table(MaxMemory, UsedMemory, DataRef, TimeRef, Freed) -> +trim_data_table(MaxMemory, UsedMemory, DataRef, TimeRef, IndexRef, Freed) -> %% Delete the oldest object - OldestSize = delete_oldest(DataRef, TimeRef), + OldestSize = delete_oldest(DataRef, TimeRef, IndexRef), trim_data_table(MaxMemory, UsedMemory, DataRef, TimeRef, + IndexRef, Freed + OldestSize). %% @private -delete_oldest(DataRef, TimeRef) -> +delete_oldest(DataRef, TimeRef, IndexRef) -> OldestTime = ets:first(TimeRef), case OldestTime of '$end_of_table' -> @@ -408,8 +570,10 @@ delete_oldest(DataRef, TimeRef) -> ets:delete(TimeRef, OldestTime), case ets:lookup(DataRef, OldestKey) of [] -> - delete_oldest(DataRef, TimeRef); + delete_oldest(DataRef, TimeRef, IndexRef); [Object] -> + {Bucket, Key} = OldestKey, + ets:match_delete(IndexRef, ?DELETE_PTN(Bucket, Key)), ets:delete(DataRef, OldestKey), object_size(Object) end @@ -482,6 +646,34 @@ max_memory_test_() -> ?_assertEqual({ok, Value2, State2}, get(Bucket, Key2, State2)) ]. +regression_367_key_range_test_() -> + {ok, State} = start(142, []), + Keys = [begin + Bin = list_to_binary(integer_to_list(I)), + if I < 10 -> + <<"obj0", Bin/binary>>; + true -> <<"obj", Bin/binary>> + end + end || I <- lists:seq(1,30) ], + Bucket = <<"keyrange">>, + Value = <<"foobarbaz">>, + State1 = lists:foldl(fun(Key, IState) -> + {ok, NewState} = put(Bucket, Key, [], Value, IState), + NewState + end, State, Keys), + Folder = fun(_B, K, Acc) -> + Acc ++ [K] + end, + [ + ?_assertEqual({ok, [<<"obj01">>]}, fold_keys(Folder, [], [{index, Bucket, {range, <<"$key">>, <<"obj01">>, <<"obj01">>}}], State1)), + ?_assertEqual({ok, [<<"obj10">>,<<"obj11">>]}, fold_keys(Folder, [], [{index, Bucket, {range, <<"$key">>, <<"obj10">>, <<"obj11">>}}], State1)), + ?_assertEqual({ok, [<<"obj01">>]}, fold_keys(Folder, [], [{index, Bucket, {range, <<"$key">>, <<"obj00">>, <<"obj01">>}}], State1)), + ?_assertEqual({ok, lists:sort(Keys)}, fold_keys(Folder, [], [{index, Bucket, {range, <<"$key">>, <<"obj0">>, <<"obj31">>}}], State1)), + ?_assertEqual({ok, []}, fold_keys(Folder, [], [{index, Bucket, {range, <<"$key">>, <<"obj31">>, <<"obj32">>}}], State1)), + ?_assertEqual({ok, [<<"obj01">>]}, fold_keys(Folder, [], [{index, Bucket, {eq, <<"$key">>, <<"obj01">>}}], State1)), + ?_assertEqual(ok, stop(State1)) + ]. + -ifdef(EQC). eqc_test_() -> @@ -493,8 +685,8 @@ eqc_test_() -> [ {timeout, 60000, [?_assertEqual(true, - backend_eqc:test(?MODULE, true))]} - ]}]}]}. + backend_eqc:test(?MODULE, true))]} + ]}]}]}. setup() -> application:load(sasl), diff --git a/src/riak_kv_mrc_pipe.erl b/src/riak_kv_mrc_pipe.erl index 2fa0d854c3..2805e5c512 100644 --- a/src/riak_kv_mrc_pipe.erl +++ b/src/riak_kv_mrc_pipe.erl @@ -107,10 +107,13 @@ %% TODO: Stolen from old-style MapReduce interface, but is 60s a good idea? -define(DEFAULT_TIMEOUT, 60000). +-define(SINK_SYNC_PERIOD_DEFAULT, 10). + -export([ mapred/2, mapred/3, mapred_stream/1, + mapred_stream/2, send_inputs/2, send_inputs/3, send_inputs_async/2, @@ -118,10 +121,17 @@ collect_outputs/2, collect_outputs/3, group_outputs/2, + mapred_stream_sink/3, + collect_sink/1, + receive_sink/1, + destroy_sink/1, + cleanup_sink/1, + error_exists/1, mapred_plan/1, mapred_plan/2, compile_string/1, - compat_fun/1 + compat_fun/1, + sink_sync_period/0 ]). %% NOTE: Example functions are used by EUnit tests -export([example/0, example_bucket/0, example_reduce/0, @@ -129,6 +139,7 @@ -include_lib("riak_pipe/include/riak_pipe.hrl"). -include_lib("riak_pipe/include/riak_pipe_log.hrl"). +-include("riak_kv_mrc_sink.hrl"). -export_type([map_query_fun/0, reduce_query_fun/0]). @@ -180,7 +191,7 @@ | {jsanon, Source :: binary()}. -type link_match() :: binary() | '_'. -%% The output of collect_outputs/2,3 and group_outputs/2 +%% The output of collect_outputs/2,3, group_outputs/2, and collect_sink/1 -type ungrouped_results() :: [{From :: non_neg_integer(), Result :: term()}]. -type grouped_results() :: [Results :: list()] | list(). @@ -197,15 +208,26 @@ mapred(Inputs, Query) -> |{error, Reason :: term(), {ok, grouped_results()} | {error, Reason :: term()}}. mapred(Inputs, Query, Timeout) -> - {{ok, Pipe}, NumKeeps} = mapred_stream(Query), - case send_inputs(Pipe, Inputs, Timeout) of - ok -> - collect_outputs(Pipe, NumKeeps, Timeout); + case mapred_stream_sink(Inputs, Query, Timeout) of + {ok, Ctx} -> + case collect_sink(Ctx) of + {ok, _}=Success -> + cleanup_sink(Ctx), + Success; + {error, _}=Error -> + destroy_sink(Ctx), + Error + end; Error -> - riak_pipe:eoi(Pipe), - {error, Error, collect_outputs(Pipe, NumKeeps, Timeout)} + {error, Error} end. +%% @equiv mapred_stream(Query, []) +-spec mapred_stream([query_part()]) -> + {{ok, riak_pipe:pipe()}, NumKeeps :: integer()}. +mapred_stream(Query) -> + mapred_stream(Query, []). + %% @doc Setup the MapReduce plumbing, preparted to receive inputs. %% The caller should then use {@link send_inputs/2} or {@link %% send_inputs/3} to give the query inputs to process. @@ -214,13 +236,51 @@ mapred(Inputs, Query, Timeout) -> %% requested to keep their inputs, and will need to be passed to %% {@link collect_outputs/3} or {@link group_outputs/2} to get labels %% compatible with HTTP and PB interface results. --spec mapred_stream([query_part()]) -> +-spec mapred_stream([query_part()], list()) -> {{ok, riak_pipe:pipe()}, NumKeeps :: integer()}. -mapred_stream(Query) -> +mapred_stream(Query, Options) when is_list(Options) -> NumKeeps = count_keeps_in_query(Query), - {riak_pipe:exec(mr2pipe_phases(Query), [{log, sink},{trace,[error]}]), + {riak_pipe:exec(mr2pipe_phases(Query), + [{log, sink},{trace,[error]}]++Options), NumKeeps}. +%% @doc Setup the MapReduce plumbing, including separate process to +%% receive output (the sink) and send input (the async sender), and a +%% delayed `pipe_timeout' message. This call returns a context record +%% containing details for each piece. Monitors are setup in the +%% process that calls this function, watching the sink and sender. +%% +%% See {@link receive_sink/1} for details about how to use this +%% context. +-spec mapred_stream_sink(input(), [query_part()], timeout()) -> + {ok, #mrc_ctx{}} | {error, term()}. +mapred_stream_sink(Inputs, Query, Timeout) -> + {ok, Sink} = riak_kv_mrc_sink:start(self(), []), + Options = [{sink, #fitting{pid=Sink}}, + {sink_type, {fsm, sink_sync_period(), infinity}}], + try mapred_stream(Query, Options) of + {{ok, Pipe}, NumKeeps} -> + %% catch just in case the pipe or sink has already died + %% for any reason - we'll get a DOWN from the monitor later + catch riak_kv_mrc_sink:use_pipe(Sink, Pipe), + SinkMon = erlang:monitor(process, Sink), + PipeRef = (Pipe#pipe.sink)#fitting.ref, + Timer = erlang:send_after(Timeout, self(), + {pipe_timeout, PipeRef}), + {Sender, SenderMon} = + riak_kv_mrc_pipe:send_inputs_async(Pipe, Inputs), + {ok, #mrc_ctx{ref=PipeRef, + pipe=Pipe, + sink={Sink,SinkMon}, + sender={Sender,SenderMon}, + timer={Timer,PipeRef}, + keeps=NumKeeps}} + catch throw:{badard, Fitting, Reason} -> + riak_kv_mrc_sink:stop(Sink), + {error, {Fitting, Reason}} + end. + + %% The plan functions are useful for seeing equivalent (we hope) pipeline. %% @doc Produce the pipe spec that will implement the given MapReduce @@ -512,10 +572,14 @@ send_inputs(Pipe, Inputs) -> -spec send_inputs(riak_pipe:pipe(), input(), timeout()) -> ok | term(). send_inputs(Pipe, BucketKeyList, _Timeout) when is_list(BucketKeyList) -> - [riak_pipe:queue_work(Pipe, BKey) - || BKey <- BucketKeyList], - riak_pipe:eoi(Pipe), - ok; + try [ok = riak_pipe:queue_work(Pipe, BKey) + || BKey <- BucketKeyList] of + _ -> + riak_pipe:eoi(Pipe), + ok + catch error:{badmatch,{error,_}=Error} -> + Error + end; send_inputs(Pipe, Bucket, Timeout) when is_binary(Bucket) -> riak_kv_pipe_listkeys:queue_existing_pipe(Pipe, Bucket, Timeout); send_inputs(Pipe, {Bucket, FilterExprs}, Timeout) -> @@ -528,7 +592,7 @@ send_inputs(Pipe, {Bucket, FilterExprs}, Timeout) -> end; send_inputs(Pipe, {index, Bucket, Index, Key}, Timeout) -> Query = {eq, Index, Key}, - case app_helper:get_env(riak_kv, mapred_2i_pipe, false) of + case riak_core_capability:get({riak_kv, mapred_2i_pipe}, false) of true -> riak_kv_pipe_index:queue_existing_pipe( Pipe, Bucket, Query, Timeout); @@ -540,7 +604,7 @@ send_inputs(Pipe, {index, Bucket, Index, Key}, Timeout) -> end; send_inputs(Pipe, {index, Bucket, Index, StartKey, EndKey}, Timeout) -> Query = {range, Index, StartKey, EndKey}, - case app_helper:get_env(riak_kv, mapred_2i_pipe, false) of + case riak_core_capability:get({riak_kv, mapred_2i_pipe}, false) of true -> riak_kv_pipe_index:queue_existing_pipe( Pipe, Bucket, Query, Timeout); @@ -576,9 +640,13 @@ send_key_list(Pipe, Bucket, ReqId) -> receive {ReqId, {keys, Keys}} -> %% Get results from list keys operation. - [riak_pipe:queue_work(Pipe, {Bucket, Key}) - || Key <- Keys], - send_key_list(Pipe, Bucket, ReqId); + try [ok = riak_pipe:queue_work(Pipe, {Bucket, Key}) + || Key <- Keys] of + _ -> + send_key_list(Pipe, Bucket, ReqId) + catch error:{badmatch,{error,_}=Error} -> + Error + end; {ReqId, {results, Results}} -> %% Get results from 2i operation. Handle both [Keys] and [{Key, @@ -589,8 +657,12 @@ send_key_list(Pipe, Bucket, ReqId) -> (Key) -> riak_pipe:queue_work(Pipe, {Bucket, Key}) end, - [F(X) || X <- Results], - send_key_list(Pipe, Bucket, ReqId); + try [ok = F(X) || X <- Results] of + _ -> + send_key_list(Pipe, Bucket, ReqId) + catch error:{badmatch,{error,_}=Error} -> + Error + end; {ReqId, {error, Reason}} -> {error, Reason}; @@ -605,9 +677,10 @@ send_key_list(Pipe, Bucket, ReqId) -> collect_outputs(Pipe, NumKeeps) -> collect_outputs(Pipe, NumKeeps, ?DEFAULT_TIMEOUT). -%% @doc Receive the results produced by the MapReduce pipe, grouped by -%% the phase they came from. See {@link group_outputs/2} for details -%% on that grouping. +%% @doc Receive the results produced by the MapReduce pipe (directly, +%% with no sink process between here and there), grouped by the phase +%% they came from. See {@link group_outputs/2} for details on that +%% grouping. -spec collect_outputs(riak_pipe:pipe(), non_neg_integer(), timeout()) -> {ok, grouped_results()} | {error, {Reason :: term(), Outputs :: ungrouped_results()}}. @@ -622,29 +695,192 @@ collect_outputs(Pipe, NumKeeps, Timeout) -> end. %% @doc Group the outputs of the MapReduce pipe by the phase that -%% produced them. If `NumKeeps' is 2 or more, the return value is a +%% produced them. To be used with {@link collect_outputs/3}. If +%% `NumKeeps' is 2 or more, the return value is a list of result +%% lists, `[Results :: list()]', in the same order as the phases that +%% produced them. If `NumKeeps' is less than 2, the return value is +%% just a list (possibly empty) of results, `Results :: list()'. +-spec group_outputs(ungrouped_results(), non_neg_integer()) -> + grouped_results(). +group_outputs(Outputs, NumKeeps) when NumKeeps < 2 -> % 0 or 1 + %% this path trusts that outputs are from only one phase; + %% if NumKeeps lies, all phases will be grouped together; + %% this is much faster than using dict:append/3 for a single key + %% when length(Outputs) is large + [ O || {_, O} <- Outputs ]; +group_outputs(Outputs, _NumKeeps) -> + Group = fun({I,O}, Acc) -> + %% it is assumed that the number of phases + %% producing outputs is small, so a linear search + %% through phases we've seen is not too taxing + case lists:keytake(I, 1, Acc) of + {value, {I, IAcc}, RAcc} -> + [{I,[O|IAcc]}|RAcc]; + false -> + [{I,[O]}|Acc] + end + end, + Merged = lists:foldl(Group, [], Outputs), + [ lists:reverse(O) || {_, O} <- lists:keysort(1, Merged) ]. + +%% @doc Receive the results produced by the MapReduce pipe, via the +%% sink started in {@link mapred_stream_sink/3}, grouped by the phase +%% they came from. If `NumKeeps' is 2 or more, the return value is a %% list of result lists, `[Results :: list()]', in the same order as %% the phases that produced them. If `NumKeeps' is less than 2, the %% return value is just a list (possibly empty) of results, `Results %% :: list()'. --spec group_outputs(ungrouped_results(), non_neg_integer()) -> - grouped_results(). -group_outputs(Outputs, NumKeeps) -> - Merged = lists:foldl(fun({I,O}, Acc) -> - dict:append(I, O, Acc) - end, - dict:new(), - Outputs), - if NumKeeps < 2 -> % 0 or 1 - case dict:to_list(Merged) of - [{_, O}] -> - O; - [] -> - %% an MR query is not required to produce output - [] +-spec collect_sink(#mrc_ctx{}) -> + {ok, grouped_results()} + | {error, {Reason :: term(), Outputs :: ungrouped_results()}}. +collect_sink(#mrc_ctx{keeps=NumKeeps}=Ctx) -> + case collect_sink_loop(Ctx, []) of + {ok, Outputs} -> + {ok, remove_fitting_names(Outputs, NumKeeps)}; + {error, Reason, _}-> + {error, Reason} + end. + +%% collect everything the pipe has to offer +collect_sink_loop(Ctx, Acc) -> + case receive_sink(Ctx) of + {ok, false, Output} -> + collect_sink_loop(Ctx, [Output|Acc]); + {ok, true, Output} -> + {ok, riak_kv_mrc_sink:merge_outputs([Output|Acc])}; + {error, Reason, Outputs} -> + {error, Reason, Outputs} + end. + +%% @doc Receive any output generated by the system set up in {@link +%% mapred_stream_sink/3}. This will include any of the following: +%% +%%
    +%%
  • `#kv_mrc_sink{}'
  • +%%
  • `DOWN' for `#mrc_ctx.sender' (the async sender)
  • +%%
  • `DOWN' for `#mrc_ctx.sink'
  • +%%
  • `{pipe_timeout, #mrc_ctx.ref}'
  • +%%
+%% +%% An `{ok, Done::boolean(), Results::orddict()}' tuple is returned if +%% a `#kv_mrc_sink{}' message is recieved with no error logs. An +%% `{error, Reason::term(), PartialResults::orddict()}' tuple is +%% returned if any of the following are received: `#kv_mrc_sink{}' +%% message with an error log, a `DOWN' for the async sender with +%% non-`normal' reason, a `DOWN' for the sink, or the `pipe_timeout'. +%% +%% Note that this function calls {@link riak_kv_mrc_sink:next/1}, so +%% your code should not also call it. +-spec receive_sink(#mrc_ctx{}) -> + {ok, Done::boolean(), Results::grouped_results()} + | {error, Reason::term(), PartialResults::grouped_results()}. +receive_sink(#mrc_ctx{sink={Sink,_}}=Ctx) -> + %% the sender-DOWN-normal case loops to ignore that message, but + %% we only want to send our next-request once + riak_kv_mrc_sink:next(Sink), + receive_sink_helper(Ctx). + +receive_sink_helper(#mrc_ctx{ref=PipeRef, + sink={Sink, SinkMon}, + sender={Sender, SenderMon}}=Ctx) -> + receive + #kv_mrc_sink{ref=PipeRef, results=Results, logs=Logs, done=Done} -> + case error_exists(Logs) of + {true, From, Info} -> + {error, {From, Info}, Results}; + false -> + {ok, Done, Results} + end; + {'DOWN', SenderMon, process, Sender, normal} -> + %% sender dying normal just means it finished + receive_sink_helper(Ctx); + {'DOWN', SenderMon, process, Sender, Reason} -> + {error, {sender_died, Reason}, []}; + {'DOWN', SinkMon, process, Sink, Reason} -> + {error, {sink_died, Reason}, []}; + {pipe_timeout, PipeRef} -> + {error, timeout, []} + end. + +%% MR is supposed to return just a list of results if there was only +%% one phase being "kept", but a list of result lists (one per phase) +%% if multiple phases were kept. +remove_fitting_names([{_,Outputs}], NumKeeps) when NumKeeps < 2 -> + Outputs; +remove_fitting_names(Outputs, _NumKeeps) -> + [O || {_, O} <- Outputs]. + +%% @doc Destroy the pipe, and call {@link cleanup_sink/1}. +-spec destroy_sink(#mrc_ctx{}) -> ok. +destroy_sink(#mrc_ctx{pipe=Pipe}=Ctx) -> + riak_pipe:destroy(Pipe), + cleanup_sink(Ctx). + +%% @doc Tear down the async sender, sink, and timer pieces setup by +%% {@link mapred_stream_sink/3}, and collect any messages they might +%% have been delivering. +-spec cleanup_sink(#mrc_ctx{}) -> ok. +cleanup_sink(#mrc_ctx{sender=Sender, sink=Sink, timer=Timer}) -> + cleanup_sender(Sender), + cleanup_sink(Sink), + cleanup_timer(Timer); +cleanup_sink({SinkPid, SinkMon}) when is_pid(SinkPid), + is_reference(SinkMon) -> + erlang:demonitor(SinkMon, [flush]), + %% killing the sink should tear down the pipe + riak_kv_mrc_sink:stop(SinkPid), + %% receive just in case the sink had sent us one last response + receive #kv_mrc_sink{} -> ok after 0 -> ok end; +cleanup_sink(undefined) -> + ok. + +%% Destroying the pipe via riak_pipe_builder:destroy/1 does not kill +%% the sender immediately, because it causes the builder to exit with +%% reason `normal', so no exit signal is sent. The sender will +%% eventually receive `worker_startup_error's from vnodes that can no +%% longer find the fittings, but to help the process along, we kill +%% them immediately here. +cleanup_sender(#mrc_ctx{sender=Sender}) -> + cleanup_sender(Sender); +cleanup_sender({SenderPid, SenderMon}) when is_pid(SenderPid), + is_reference(SenderMon) -> + erlang:demonitor(SenderMon, [flush]), + exit(SenderPid, kill), + ok; +cleanup_sender(undefined) -> + ok. + +%% don't let timer messages leak +cleanup_timer(#mrc_ctx{timer=Timer}) -> + cleanup_timer(Timer); +cleanup_timer({Tref, PipeRef}) when is_reference(Tref), + is_reference(PipeRef) -> + case erlang:cancel_timer(Tref) of + false -> + receive + {pipe_timeout, PipeRef} -> + ok + after 0 -> + ok end; - true -> - [ O || {_, O} <- lists:keysort(1, dict:to_list(Merged)) ] + _ -> + ok + end; +cleanup_timer(undefined) -> + ok. + +%% @doc Look through the logs the pipe produced, and determine if any +%% of them signal an error. Return the details about the first error +%% found. +%% +%% Each log should be of the form: `{#pipe_log.from, #pipe_log.msg}' +-spec error_exists(list()) -> {true, term(), term()} | false. +error_exists(Logs) -> + case [ {F, I} || {F, {trace, [error], {error, I}}} <- Logs ] of + [{From, Info}|_] -> + {true, From, Info}; + [] -> + false end. %% @doc Produce an Erlang term from a string containing Erlang code. @@ -665,6 +901,16 @@ compile_string(String) when is_list(String) -> {Type, Error} end. +%% choose sink sync period, given Options, app env, default +-spec sink_sync_period() -> integer() | infinity. +sink_sync_period() -> + case application:get_env(riak_kv, mrc_sink_sync_period) of + {ok, Size} when is_integer(Size); Size == infinity -> + Size; + _ -> + ?SINK_SYNC_PERIOD_DEFAULT + end. + %%% %% @doc Use a MapReduce query to get the value of the `foo/bar' diff --git a/src/riak_kv_mrc_sink.erl b/src/riak_kv_mrc_sink.erl new file mode 100644 index 0000000000..8a627f6411 --- /dev/null +++ b/src/riak_kv_mrc_sink.erl @@ -0,0 +1,434 @@ +%% ------------------------------------------------------------------- +%% +%% riak_kv_mrc_sink: A simple process to act as a Pipe sink for +%% MapReduce queries +%% +%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +%% @doc This FSM acts as a Riak Pipe sink, and dumbly accumulates +%% messages received from the pipe, until it is asked to send them to +%% its owner. The owner is whatever process started this FSM. + +%% This FSM will speak both `raw' and `fsm' sink types (it +%% answers appropriately to each, without parameterization). + +%% The FSM enforces a soft cap on the number of results and logs +%% accumulated when receiving `fsm' sink type messages. When the +%% number of results+logs that have been delivered exceeds the cap +%% between calls to {@link next/1}, the sink stops delivering result +%% acks to workers. The value of this cap can be specified by +%% including a `buffer' property in the `Options' parameter of {@link +%% start/2}, or by setting the `mrc_sink_buffer' environment variable +%% in the `riak_kv' application. If neither settings is specified, or +%% they are not specified as non-negative integers, the default +%% (currently 1000) is used. + +%% Messages are delivered to the owners as an erlang message that is a +%% `#kv_mrc_pipe{}' record. The `logs' field is a list of log messages +%% received, ordered oldest to youngest, each having the form +%% `{PhaseId, Message}'. The `results' field is an orddict keyed by +%% `PhaseId', with each value being a list of results received from +%% that phase, ordered oldest to youngest. The `ref' field is the +%% reference from the `#pipe{}' record. The `done' field is `true' if +%% the `eoi' message has been received, or `false' otherwise. + +%% There should be three states: `which_pipe', `collect_output', and +%% `send_output'. + +%% The FSM starts in `which_pipe', and waits there until it +%% is told which pipe to expect output from. + +%% From `which_pipe', the FSM moves to `collect_output'. While in +%% `collect_output', the FSM simply collects `#pipe_log{}', +%% `#pipe_result{}', and `#pipe_eoi{}' messages. + +%% If the FSM has received logs, results, or the eoi before it +%% receives a `next' event, it sends everything it has accumulated to +%% the owner, wrapped in a `#kv_mrc_sink{}' record, clears its buffers, +%% and returns to collecting pipe messages. + +%% If the FSM has not received any logs, results, or the eoi before it +%% receives a `next' event, it enters the `send_ouput' state. As soon +%% as the FSM receives any log, result, or eoi message in the +%% `send_output' state, it sends that message to the owner process, +%% and then returns to the `collect_output' state. + +%% The FSM only exits on its own in three cases. The first is when its +%% owner exits. The second is when the builder of the pipe for which +%% it is consuming messages exits abnormally. The third is after it +%% delivers the a `#kv_mrc_sink{}' in which it has marked +%% `done=true'. +-module(riak_kv_mrc_sink). + +-export([ + start/2, + start_link/2, + use_pipe/2, + next/1, + stop/1, + merge_outputs/1, + init/1, + which_pipe/2, which_pipe/3, + collect_output/2, collect_output/3, + send_output/2, send_output/3, + handle_event/3, + handle_sync_event/4, + handle_info/3, + terminate/3, + code_change/4 + ]). + +-behaviour(gen_fsm). + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). +-endif. + +-include_lib("riak_pipe/include/riak_pipe.hrl"). +-include("riak_kv_mrc_sink.hrl"). + +-define(BUFFER_SIZE_DEFAULT, 1000). + +-record(state, { + owner :: pid(), + builder :: pid(), + ref :: reference(), + results=[] :: [{PhaseId::term(), Results::list()}], + delayed_acks=[] :: list(), + logs=[] :: list(), + done=false :: boolean(), + buffer_max :: integer(), + buffer_left :: integer() + }). + +start(OwnerPid, Options) -> + riak_kv_mrc_sink_sup:start_sink(OwnerPid, Options). + +start_link(OwnerPid, Options) -> + gen_fsm:start_link(?MODULE, [OwnerPid, Options], []). + +use_pipe(Sink, Pipe) -> + gen_fsm:sync_send_event(Sink, {use_pipe, Pipe}). + +%% @doc Trigger the send of the next result/log/eoi batch received. +next(Sink) -> + gen_fsm:send_event(Sink, next). + +stop(Sink) -> + riak_kv_mrc_sink_sup:terminate_sink(Sink). + +%% @doc Convenience: If outputs are collected as a list of orddicts, +%% with the first being the most recently received, merge them into +%% one orddict. +%% +%% That is, for one keep, our input should look like: +%% [ [{0, [G,H,I]}], [{0, [D,E,F]}], [{0, [A,B,C]}] ] +%% And we want it to come out as: +%% [{0, [A,B,C,D,E,F,G,H,I]}] +-spec merge_outputs([ [{integer(), list()}] ]) -> [{integer(), list()}]. +merge_outputs(Acc) -> + %% each orddict has its outputs in oldest->newest; since we're + %% iterating from newest->oldest overall, we can just tack the + %% next list onto the front of the accumulator + DM = fun(_K, O, A) -> O++A end, + lists:foldl(fun(O, A) -> orddict:merge(DM, O, A) end, [], Acc). + +%% gen_fsm exports + +init([OwnerPid, Options]) -> + erlang:monitor(process, OwnerPid), + Buffer = buffer_size(Options), + {ok, which_pipe, #state{owner=OwnerPid, + buffer_max=Buffer, + buffer_left=Buffer}}. + +%%% which_pipe: waiting to find out what pipe we're listening to + +which_pipe(_, State) -> + {next_state, which_pipe, State}. + +which_pipe({use_pipe, #pipe{builder=Builder, sink=Sink}}, _From, State) -> + erlang:monitor(process, Builder), + {reply, ok, collect_output, + State#state{builder=Builder, ref=Sink#fitting.ref}}; +which_pipe(_, _, State) -> + {next_state, which_pipe, State}. + +%%% collect_output: buffering results and logs until asked for them + +collect_output(next, State) -> + case State#state.done of + true -> + NewState = send_to_owner(State), + {stop, normal, NewState}; + false -> + case has_output(State) of + true -> + NewState = send_to_owner(State), + {next_state, collect_output, NewState}; + false -> + %% nothing to send yet, prepare to send as soon as + %% there is something + {next_state, send_output, State} + end + end; +collect_output(#pipe_result{ref=Ref, from=PhaseId, result=Res}, + #state{ref=Ref, results=Acc}=State) -> + NewAcc = add_result(PhaseId, Res, Acc), + {next_state, collect_output, State#state{results=NewAcc}}; +collect_output(#pipe_log{ref=Ref, from=PhaseId, msg=Msg}, + #state{ref=Ref, logs=Acc}=State) -> + {next_state, collect_output, State#state{logs=[{PhaseId, Msg}|Acc]}}; +collect_output(#pipe_eoi{ref=Ref}, #state{ref=Ref}=State) -> + {next_state, collect_output, State#state{done=true}}; +collect_output(_, State) -> + {next_state, collect_output, State}. + +collect_output(#pipe_result{ref=Ref, from=PhaseId, result=Res}, + From, + #state{ref=Ref, results=Acc}=State) -> + NewAcc = add_result(PhaseId, Res, Acc), + maybe_ack(From, State#state{results=NewAcc}); +collect_output(#pipe_log{ref=Ref, from=PhaseId, msg=Msg}, + From, + #state{ref=Ref, logs=Acc}=State) -> + maybe_ack(From, State#state{logs=[{PhaseId, Msg}|Acc]}); +collect_output(#pipe_eoi{ref=Ref}, _From, #state{ref=Ref}=State) -> + {reply, ok, collect_output, State#state{done=true}}; +collect_output(_, _, State) -> + {next_state, collect_output, State}. + +maybe_ack(_From, #state{buffer_left=Left}=State) when Left > 0 -> + %% there's room for more, tell the worker it can continue + {reply, ok, collect_output, State#state{buffer_left=Left-1}}; +maybe_ack(From, #state{buffer_left=Left, delayed_acks=Delayed}=State) -> + %% there's no more room, hold up the worker + %% not actually necessary to update buffer_left, but it could make + %% for interesting stats + {next_state, collect_output, + State#state{buffer_left=Left-1, delayed_acks=[From|Delayed]}}. + +%% send_output: waiting for output to send, after having been asked +%% for some while there wasn't any + +send_output(#pipe_result{ref=Ref, from=PhaseId, result=Res}, + #state{ref=Ref, results=Acc}=State) -> + NewAcc = add_result(PhaseId, Res, Acc), + NewState = send_to_owner(State#state{results=NewAcc}), + {next_state, collect_output, NewState}; +send_output(#pipe_log{ref=Ref, from=PhaseId, msg=Msg}, + #state{ref=Ref, logs=Acc}=State) -> + NewState = send_to_owner(State#state{logs=[{PhaseId, Msg}|Acc]}), + {next_state, collect_output, NewState}; +send_output(#pipe_eoi{ref=Ref}, #state{ref=Ref}=State) -> + NewState = send_to_owner(State#state{done=true}), + {stop, normal, NewState}; +send_output(_, State) -> + {next_state, send_output, State}. + +send_output(#pipe_result{ref=Ref, from=PhaseId, result=Res}, + _From, #state{ref=Ref, results=Acc}=State) -> + NewAcc = add_result(PhaseId, Res, Acc), + NewState = send_to_owner(State#state{results=NewAcc}), + {reply, ok, collect_output, NewState}; +send_output(#pipe_log{ref=Ref, from=PhaseId, msg=Msg}, + _From, #state{ref=Ref, logs=Acc}=State) -> + NewState = send_to_owner(State#state{logs=[{PhaseId, Msg}|Acc]}), + {reply, ok, collect_output, NewState}; +send_output(#pipe_eoi{ref=Ref}, _From, #state{ref=Ref}=State) -> + NewState = send_to_owner(State#state{done=true}), + {stop, normal, ok, NewState}; +send_output(_, _, State) -> + {next_state, send_output, State}. + +handle_event(_, StateName, State) -> + {next_state, StateName, State}. + +handle_sync_event(_, _, StateName, State) -> + {next_state, StateName, State}. + +%% Clusters containing nodes running Riak version 1.2 and previous +%% will send raw results, regardless of sink type. We can't block +%% these worker sending raw results, but we can still track these +%% additions, and block other workers because of them. +handle_info(#pipe_result{ref=Ref, from=PhaseId, result=Res}, + StateName, + #state{ref=Ref, results=Acc, buffer_left=Left}=State) -> + NewAcc = add_result(PhaseId, Res, Acc), + info_response(StateName, + State#state{results=NewAcc, buffer_left=Left-1}); +handle_info(#pipe_log{ref=Ref, from=PhaseId, msg=Msg}, + StateName, + #state{ref=Ref, logs=Acc, buffer_left=Left}=State) -> + info_response(StateName, + State#state{logs=[{PhaseId, Msg}|Acc], + buffer_left=Left-1}); +handle_info(#pipe_eoi{ref=Ref}, + StateName, #state{ref=Ref}=State) -> + info_response(StateName, State#state{done=true}); +handle_info({'DOWN', _, process, Pid, _Reason}, _, + #state{owner=Pid}=State) -> + %% exit as soon as the owner dies + {stop, normal, State}; +handle_info({'DOWN', _, process, Pid, Reason}, _, + #state{builder=Pid}=State) when Reason /= normal -> + %% don't stop when the builder exits 'normal', because that's + %% probably just the pipe shutting down normally - wait for the + %% owner to ask for the last outputs + {stop, normal, State}; +handle_info(_, StateName, State) -> + {next_state, StateName, State}. + +%% continue buffering, unless we've been waiting to reply; stop if we +%% were waiting to reply and we've received eoi +info_response(collect_output, State) -> + {next_state, collect_output, State}; +info_response(send_output, #state{done=Done}=State) -> + NewState = send_to_owner(State), + if Done -> {stop, normal, NewState}; + true -> {next_state, collect_output, NewState} + end. + +terminate(_, _, _) -> + ok. + +code_change(_, StateName, State, _) -> + {ok, StateName, State}. + +%% internal + +has_output(#state{results=[], logs=[]}) -> + false; +has_output(_) -> + true. + +%% also clears buffers +send_to_owner(#state{owner=Owner, ref=Ref, + results=Results, logs=Logs, done=Done, + buffer_max=Max, delayed_acks=Delayed}=State) -> + Owner ! #kv_mrc_sink{ref=Ref, + results=finish_results(Results), + logs=lists:reverse(Logs), + done=Done}, + [ gen_fsm:reply(From, ok) || From <- Delayed ], + State#state{results=[], logs=[], + buffer_left=Max, delayed_acks=[]}. + +%% results are kept as lists in a proplist +add_result(PhaseId, Result, Acc) -> + case lists:keytake(PhaseId, 1, Acc) of + {value, {PhaseId, IAcc}, RAcc} -> + [{PhaseId,[Result|IAcc]}|RAcc]; + false -> + [{PhaseId,[Result]}|Acc] + end. + +%% transform the proplist buffers into orddicts time-ordered +finish_results(Results) -> + [{I, lists:reverse(R)} || {I, R} <- lists:keysort(1, Results)]. + +%% choose buffer size, given Options, app env, default +-spec buffer_size(list()) -> non_neg_integer(). +buffer_size(Options) -> + case buffer_size_options(Options) of + {ok, Size} -> Size; + false -> + case buffer_size_app_env() of + {ok, Size} -> Size; + false -> + ?BUFFER_SIZE_DEFAULT + end + end. + +-spec buffer_size_options(list()) -> non_neg_integer(). +buffer_size_options(Options) -> + case lists:keyfind(buffer, 1, Options) of + {buffer, Size} when is_integer(Size), Size >= 0 -> + {ok, Size}; + _ -> + false + end. + +-spec buffer_size_app_env() -> non_neg_integer(). +buffer_size_app_env() -> + case application:get_env(riak_kv, mrc_sink_buffer) of + {ok, Size} when is_integer(Size), Size >= 0 -> + {ok, Size}; + _ -> + false + end. + +%% TEST + +-ifdef(TEST). + +buffer_size_test_() -> + Tests = [ {"buffer option", 5, [{buffer, 5}], []}, + {"buffer app env", 5, [], [{mrc_sink_buffer, 5}]}, + {"buffer default", ?BUFFER_SIZE_DEFAULT, [], []} ], + {foreach, + fun() -> application:load(riak_kv) end, + fun(_) -> application:unload(riak_kv) end, + [buffer_size_test_helper(Name, Size, Options, AppEnv) + || {Name, Size, Options, AppEnv} <- Tests]}. + +buffer_size_test_helper(Name, Size, Options, AppEnv) -> + {Name, + fun() -> + application:load(riak_kv), + [ application:set_env(riak_kv, K, V) || {K, V} <- AppEnv ], + + %% start up our sink + {ok, Sink} = ?MODULE:start_link(self(), Options), + Ref = make_ref(), + Pipe = #pipe{builder=self(), + sink=#fitting{pid=Sink, ref=Ref}}, + ?MODULE:use_pipe(Sink, Pipe), + + %% fill its buffer + [ ok = gen_fsm:sync_send_event( + Sink, + #pipe_result{from=tester, ref=Ref, result=I}, + 1000) + || I <- lists:seq(1, Size) ], + + %% ensure extra result will block + {'EXIT',{timeout,{gen_fsm,sync_send_event,_}}} = + (catch gen_fsm:sync_send_event( + Sink, + #pipe_result{from=tester, ref=Ref, result=Size+1}, + 1000)), + + %% now drain what's there + ?MODULE:next(Sink), + + %% make sure that all results were received, including + %% blocked one + receive + #kv_mrc_sink{ref=Ref, results=[{tester,R}]} -> + ?assertEqual(Size+1, length(R)) + end, + %% make sure that the delayed ack was received + receive + {GenFsmRef, ok} when is_reference(GenFsmRef) -> + ok + end + end}. + +-endif. diff --git a/src/riak_kv_mrc_sink_sup.erl b/src/riak_kv_mrc_sink_sup.erl new file mode 100644 index 0000000000..155020bafe --- /dev/null +++ b/src/riak_kv_mrc_sink_sup.erl @@ -0,0 +1,83 @@ +%% ------------------------------------------------------------------- +%% +%% Copyright (c) 2011 Basho Technologies, Inc. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +%% @doc Supervisor for a sink processes used by {@link +%% riak_kv_wm_mapred} and {@link riak_kv_pb_mapred}. +-module(riak_kv_mrc_sink_sup). + +-behaviour(supervisor). + +%% API +-export([start_link/0]). +-export([start_sink/2, + terminate_sink/1]). + +%% Supervisor callbacks +-export([init/1]). + +%%%=================================================================== +%%% API functions +%%%=================================================================== + +%% @doc Start the supervisor. +-spec start_link() -> {ok, pid()} | ignore | {error, term()}. +start_link() -> + supervisor:start_link({local, ?MODULE}, ?MODULE, []). + +%% @doc Start a new worker under the supervisor. +-spec start_sink(pid(), list()) -> {ok, pid()}. +start_sink(Owner, Options) -> + supervisor:start_child(?MODULE, [Owner, Options]). + +%% @doc Stop a worker immediately +-spec terminate_sink(pid()) -> ok | {error, term()}. +terminate_sink(Sink) -> + supervisor:terminate_child(?MODULE, Sink). + +%%%=================================================================== +%%% Supervisor callbacks +%%%=================================================================== + +%% @doc Initialize the supervisor. This is a `simple_one_for_one', +%% whose child spec is for starting `riak_kv_mrc_sink' FSMs. +-spec init([]) -> {ok, {{supervisor:strategy(), + pos_integer(), + pos_integer()}, + [ supervisor:child_spec() ]}}. +init([]) -> + RestartStrategy = simple_one_for_one, + MaxRestarts = 1000, + MaxSecondsBetweenRestarts = 3600, + + SupFlags = {RestartStrategy, MaxRestarts, MaxSecondsBetweenRestarts}, + + Restart = temporary, + Shutdown = 2000, + Type = worker, + + AChild = {undefined, % no registered name + {riak_kv_mrc_sink, start_link, []}, + Restart, Shutdown, Type, [riak_kv_mrc_sink]}, + + {ok, {SupFlags, [AChild]}}. + +%%%=================================================================== +%%% Internal functions +%%%=================================================================== diff --git a/src/riak_kv_multi_backend.erl b/src/riak_kv_multi_backend.erl index db700c10d4..0b6b813004 100644 --- a/src/riak_kv_multi_backend.erl +++ b/src/riak_kv_multi_backend.erl @@ -314,7 +314,12 @@ is_empty(#state{backends=Backends}) -> -spec status(state()) -> [{atom(), term()}]. status(#state{backends=Backends}) -> %% @TODO Reexamine how this is handled - [{N, Mod:status(ModState)} || {N, Mod, ModState} <- Backends]. + %% all backend mods return a proplist from Mod:status/1 + %% So as to tag the backend with its mod, without + %% breaking this API list of two tuples return, + %% add the tuple {mod, Mod} to the status for each + %% backend. + [{N, [{mod, Mod} | Mod:status(ModState)]} || {N, Mod, ModState} <- Backends]. %% @doc Register an asynchronous callback -spec callback(reference(), any(), state()) -> {ok, state()}. @@ -535,10 +540,10 @@ eqc_test_() -> [{setup, fun setup/0, fun cleanup/1, - [?_assertEqual(true, - backend_eqc:test(?MODULE, true, sample_config())), - ?_assertEqual(true, - backend_eqc:test(?MODULE, true, async_fold_config())) + [{timeout, 60000, [?_assertEqual(true, + backend_eqc:test(?MODULE, true, sample_config()))]}, + {timeout, 60000, [?_assertEqual(true, + backend_eqc:test(?MODULE, true, async_fold_config()))]} ]}]}]}. setup() -> @@ -603,6 +608,7 @@ extra_callback_test() -> application:stop(bitcask). bad_config_test() -> + application:unset_env(riak_kv, multi_backend), ErrorReason = multi_backend_config_unset, ?assertEqual({error, ErrorReason}, start(0, [])). diff --git a/src/riak_kv_pb_bucket.erl b/src/riak_kv_pb_bucket.erl new file mode 100644 index 0000000000..1f40c7dfd4 --- /dev/null +++ b/src/riak_kv_pb_bucket.erl @@ -0,0 +1,131 @@ +%% ------------------------------------------------------------------- +%% +%% riak_kv_pb_bucket: Expose KV bucket functionality to Protocol Buffers +%% +%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +%% @doc

The Bucket PB service for Riak KV. This covers the +%% following request messages in the original protocol:

+%% +%%
+%% 15 - RpbListBucketsReq
+%% 17 - RpbListKeysReq
+%% 19 - RpbGetBucketReq
+%% 21 - RpbSetBucketReq
+%% 
+%% +%%

This service produces the following responses:

+%% +%%
+%% 16 - RpbListBucketsResp
+%% 18 - RpbListKeysResp{1,}
+%% 20 - RpbGetBucketResp
+%% 22 - RpbSetBucketResp
+%% 
+%% +%%

The semantics are unchanged from their original +%% implementations.

+%% @end + +-module(riak_kv_pb_bucket). + +-include_lib("riak_pb/include/riak_kv_pb.hrl"). + +-behaviour(riak_api_pb_service). + +-export([init/0, + decode/2, + encode/1, + process/2, + process_stream/3]). + +-record(state, {client, % local client + req, % current request (for multi-message requests like list keys) + req_ctx}). % context to go along with request (partial results, request ids etc) + +%% @doc init/0 callback. Returns the service internal start +%% state. +-spec init() -> any(). +init() -> + {ok, C} = riak:local_client(), + #state{client=C}. + +%% @doc decode/2 callback. Decodes an incoming message. +decode(Code, Bin) -> + {ok, riak_pb_codec:decode(Code, Bin)}. + +%% @doc encode/1 callback. Encodes an outgoing response message. +encode(Message) -> + {ok, riak_pb_codec:encode(Message)}. + +%% @doc process/2 callback. Handles an incoming request message. +process(rpblistbucketsreq, + #state{client=C} = State) -> + case C:list_buckets() of + {ok, Buckets} -> + {reply, #rpblistbucketsresp{buckets = Buckets}, State}; + {error, Reason} -> + {error, {format, Reason}, State} + end; + +%% Start streaming in list keys +process(#rpblistkeysreq{bucket=B}=Req, #state{client=C} = State) -> + %% stream_list_keys results will be processed by process_stream/3 + {ok, ReqId} = C:stream_list_keys(B), + {reply, {stream, ReqId}, State#state{req = Req, req_ctx = ReqId}}; + +%% Get bucket properties +process(#rpbgetbucketreq{bucket=B}, + #state{client=C} = State) -> + Props = C:get_bucket(B), + PbProps = riak_pb_kv_codec:encode_bucket_props(Props), + {reply, #rpbgetbucketresp{props = PbProps}, State}; + +%% Set bucket properties +process(#rpbsetbucketreq{bucket=B, props = PbProps}, + #state{client=C} = State) -> + Props = riak_pb_kv_codec:decode_bucket_props(PbProps), + case C:set_bucket(B, Props) of + ok -> + {reply, rpbsetbucketresp, State}; + {error, Details} -> + {error, {format, "Invalid bucket properties: ~p", [Details]}, State} + end. + +%% @doc process_stream/3 callback. Handles streaming keys messages. +process_stream({ReqId, done}, ReqId, + State=#state{req=#rpblistkeysreq{}, req_ctx=ReqId}) -> + {done, #rpblistkeysresp{done = 1}, State}; +process_stream({ReqId, From, {keys, []}}, ReqId, + State=#state{req=#rpblistkeysreq{}, req_ctx=ReqId}) -> + riak_kv_keys_fsm:ack_keys(From), + {ignore, State}; +process_stream({ReqId, {keys, []}}, ReqId, + State=#state{req=#rpblistkeysreq{}, req_ctx=ReqId}) -> + {ignore, State}; +process_stream({ReqId, From, {keys, Keys}}, ReqId, + State=#state{req=#rpblistkeysreq{}, req_ctx=ReqId}) -> + riak_kv_keys_fsm:ack_keys(From), + {reply, #rpblistkeysresp{keys = Keys}, State}; +process_stream({ReqId, {keys, Keys}}, ReqId, + State=#state{req=#rpblistkeysreq{}, req_ctx=ReqId}) -> + {reply, #rpblistkeysresp{keys = Keys}, State}; +process_stream({ReqId, Error}, ReqId, + State=#state{ req=#rpblistkeysreq{}, req_ctx=ReqId}) -> + {error, {format, Error}, State#state{req = undefined, req_ctx = undefined}}. diff --git a/src/riak_kv_pb_index.erl b/src/riak_kv_pb_index.erl new file mode 100644 index 0000000000..7ef3f198e8 --- /dev/null +++ b/src/riak_kv_pb_index.erl @@ -0,0 +1,102 @@ +%% ------------------------------------------------------------------- +%% +%% riak_kv_pb_index: Expose secondary index queries to Protocol Buffers +%% +%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +%% @doc

The Secondary Index PB service for Riak KV. This covers the +%% following request messages:

+%% +%%
+%%  25 - RpbIndexReq
+%% 
+%% +%%

This service produces the following responses:

+%% +%%
+%%  26 - RpbIndexResp
+%% 
+%% @end + +-module(riak_kv_pb_index). + +-include_lib("riak_pb/include/riak_kv_pb.hrl"). + +-behaviour(riak_api_pb_service). + +-export([init/0, + decode/2, + encode/1, + process/2, + process_stream/3]). + +-record(state, {client}). + +%% @doc init/0 callback. Returns the service internal start +%% state. +-spec init() -> any(). +init() -> + {ok, C} = riak:local_client(), + #state{client=C}. + +%% @doc decode/2 callback. Decodes an incoming message. +decode(Code, Bin) -> + {ok, riak_pb_codec:decode(Code, Bin)}. + +%% @doc encode/1 callback. Encodes an outgoing response message. +encode(Message) -> + {ok, riak_pb_codec:encode(Message)}. + +%% @doc process/2 callback. Handles an incoming request message. +process(#rpbindexreq{qtype=eq, key=SKey}, State) + when not is_binary(SKey) -> + {error, {format, "Invalid equality query ~p", [SKey]}, State}; +process(#rpbindexreq{qtype=range, range_min=Min, range_max=Max}, State) + when not (is_binary(Min) andalso is_binary(Max)) -> + {error, {format, "Invalid range query: ~p -> ~p", [Min, Max]}, State}; +process(#rpbindexreq{bucket=Bucket, index=Index, qtype=eq, key=SKey}, #state{client=Client}=State) -> + case riak_index:to_index_query(Index, [SKey]) of + {ok, Query} -> + case Client:get_index(Bucket, Query) of + {ok, Results} -> + {reply, #rpbindexresp{keys=Results}, State}; + {error, QReason} -> + {error, {format, QReason}, State} + end; + {error, Reason} -> + {error, {format, Reason}, State} + end; +process(#rpbindexreq{bucket=Bucket, index=Index, qtype=range, + range_min=Min, range_max=Max}, #state{client=Client}=State) -> + case riak_index:to_index_query(Index, [Min, Max]) of + {ok, Query} -> + case Client:get_index(Bucket, Query) of + {ok, Results} -> + {reply, #rpbindexresp{keys=Results}, State}; + {error, QReason} -> + {error, {format, QReason}, State} + end; + {error, Reason} -> + {error, {format, Reason}, State} + end. + +%% @doc process_stream/3 callback. This service does not create any +%% streaming responses and so ignores all incoming messages. +process_stream(_,_,State) -> + {ignore, State}. diff --git a/src/riak_kv_pb_listener.erl b/src/riak_kv_pb_listener.erl deleted file mode 100644 index 92d267c569..0000000000 --- a/src/riak_kv_pb_listener.erl +++ /dev/null @@ -1,62 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_pb_listener: Listen for protocol buffer clients -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc entry point for TCP-based protocol buffers service - --module(riak_kv_pb_listener). --behavior(gen_nb_server). --export([start_link/0]). --export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3]). --export([sock_opts/0, new_connection/2]). --record(state, {portnum}). - -start_link() -> - PortNum = app_helper:get_env(riak_kv, pb_port), - IpAddr = app_helper:get_env(riak_kv, pb_ip), - gen_nb_server:start_link(?MODULE, IpAddr, PortNum, [PortNum]). - -init([PortNum]) -> - {ok, #state{portnum=PortNum}}. - -sock_opts() -> - BackLog = app_helper:get_env(riak_kv, pb_backlog, 5), - NoDelay = app_helper:get_env(riak_kv, disable_pb_nagle, true), - [binary, {packet, 4}, {reuseaddr, true}, {backlog, BackLog}, {nodelay, NoDelay}]. - -handle_call(_Req, _From, State) -> - {reply, not_implemented, State}. - -handle_cast(_Msg, State) -> {noreply, State}. - -handle_info(_Info, State) -> {noreply, State}. - -terminate(_Reason, _State) -> ok. - -code_change(_OldVsn, State, _Extra) -> {ok, State}. - -new_connection(Socket, State) -> - {ok, Pid} = riak_kv_pb_socket_sup:start_socket(), - ok = gen_tcp:controlling_process(Socket, Pid), - ok = riak_kv_pb_socket:set_socket(Pid, Socket), - {ok, State}. - diff --git a/src/riak_kv_pb_mapred.erl b/src/riak_kv_pb_mapred.erl new file mode 100644 index 0000000000..367130e5e6 --- /dev/null +++ b/src/riak_kv_pb_mapred.erl @@ -0,0 +1,222 @@ +%% ------------------------------------------------------------------- +%% +%% riak_kv_pb_mapred: Expose KV MapReduce functionality to Protocol Buffers +%% +%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +%% @doc

The PB Service for MapReduce processing in Riak KV. This +%% covers the following request messages in the original protocol:

+%% +%%
+%% 23 - RpbMapRedReq
+%% 
+%% +%%

This service produces the following responses:

+%% +%%
+%% 24 - RpbMapRedResp{1,}
+%% 
+%% @end + +-module(riak_kv_pb_mapred). + +-include_lib("riak_pb/include/riak_kv_pb.hrl"). +-include_lib("riak_pipe/include/riak_pipe.hrl"). +-include("riak_kv_mrc_sink.hrl"). + +-behaviour(riak_api_pb_service). + +-export([init/0, + decode/2, + encode/1, + process/2, + process_stream/3]). + +-record(state, {req, + req_ctx}). + +-record(pipe_ctx, {ref, % easier-access ref/reqid + mrc, % #mrc_ctx{} + sender, % {pid(), monitor()} of process sending inputs + sink, % {pid(), monitor()} of process collecting outputs + has_mr_query}). % true if the request contains a query. + +init() -> + #state{}. + +decode(Code, Bin) -> + {ok, riak_pb_codec:decode(Code, Bin)}. + +encode(Message) -> + {ok, riak_pb_codec:encode(Message)}. + +%% Start map/reduce job - results will be processed in handle_info +process(#rpbmapredreq{request=MrReq, content_type=ContentType}=Req, + State) -> + case decode_mapred_query(MrReq, ContentType) of + {error, Reason} -> + {error, {format, Reason}, State}; + {ok, Inputs, Query, Timeout} -> + pipe_mapreduce(Req, State, Inputs, Query, Timeout) + end. + +process_stream(#kv_mrc_sink{ref=ReqId, + results=Results, + logs=Logs, + done=Done}, + ReqId, + State=#state{req=#rpbmapredreq{}, + req_ctx=#pipe_ctx{ref=ReqId, + mrc=Mrc}=PipeCtx}) -> + case riak_kv_mrc_pipe:error_exists(Logs) of + false -> + case msgs_for_results(Results, State) of + {ok, Msgs} -> + if Done -> + cleanup_pipe(PipeCtx), + %% we could set the done=1 flag on the + %% final results message, but that has + %% never been done, so there are probably + %% client libs that aren't expecting it; + %% play it safe for now + {done, + Msgs++[#rpbmapredresp{done=1}], + clear_state_req(State)}; + true -> + {Sink, _} = Mrc#mrc_ctx.sink, + riak_kv_mrc_sink:next(Sink), + {reply, Msgs, State} + end; + {error, Reason} -> + destroy_pipe(PipeCtx), + {error, Reason, clear_state_req(State)} + end; + {true, From, Info} -> + destroy_pipe(PipeCtx), + JsonInfo = {struct, riak_kv_mapred_json:jsonify_pipe_error( + From, Info)}, + {error, + mochijson2:encode(JsonInfo), + clear_state_req(State)} + end; + +process_stream({'DOWN', Ref, process, Pid, Reason}, Ref, + State=#state{req=#rpbmapredreq{}, + req_ctx=#pipe_ctx{sender={Pid, Ref}}=PipeCtx}) -> + %% the async input sender exited + if Reason == normal -> + %% just reached the end of the input sending - all is + %% well, continue processing + NewPipeCtx = PipeCtx#pipe_ctx{sender=undefined}, + {ignore, State#state{req_ctx=NewPipeCtx}}; + true -> + %% something went wrong sending inputs - tell the client + %% about it, and shutdown the pipe + destroy_pipe(PipeCtx), + lager:error("Error sending inputs: ~p", [Reason]), + {error, {format, "Error sending inputs: ~p", [Reason]}, + clear_state_req(State)} + end; +process_stream({'DOWN', Mon, process, Pid, Reason}, _, + State=#state{req=#rpbmapredreq{}, + req_ctx=#pipe_ctx{sink={Pid, Mon}}=PipeCtx}) -> + %% the sink died, which it shouldn't be able to do before + %% delivering our final results + destroy_pipe(PipeCtx), + lager:error("Error receiving outputs: ~p", [Reason]), + {error, + {format, "Error receiving outputs: ~p", [Reason]}, + clear_state_req(State)}; +process_stream({pipe_timeout, Ref}, Ref, + State=#state{req=#rpbmapredreq{}, + req_ctx=#pipe_ctx{ref=Ref}=PipeCtx}) -> + destroy_pipe(PipeCtx), + {error, "timeout", clear_state_req(State)}; + +process_stream(_,_,State) -> % Ignore any late replies from gen_servers/messages from fsms + {ignore, State}. + + +%% =================================================================== +%% Internal functions +%% =================================================================== + +clear_state_req(State) -> + State#state{req=undefined, req_ctx=undefined}. + +destroy_pipe(#pipe_ctx{mrc=Mrc}) -> + riak_kv_mrc_pipe:destroy_sink(Mrc). + +cleanup_pipe(#pipe_ctx{mrc=Mrc}) -> + riak_kv_mrc_pipe:cleanup_sink(Mrc). + +pipe_mapreduce(Req, State, Inputs, Query, Timeout) -> + case riak_kv_mrc_pipe:mapred_stream_sink(Inputs, Query, Timeout) of + {ok, #mrc_ctx{ref=PipeRef, + sink={Sink,SinkMon}, + sender={Sender,SenderMon}}=Mrc} -> + riak_kv_mrc_sink:next(Sink), + %% pulling ref, sink, and sender out to make matches less + %% nested in process callbacks + Ctx = #pipe_ctx{ref=PipeRef, + mrc=Mrc, + sink={Sink,SinkMon}, + sender={Sender,SenderMon}, + has_mr_query = (Query /= [])}, + {reply, {stream, PipeRef}, State#state{req=Req, req_ctx=Ctx}}; + {error, {Fitting, Reason}} -> + {error, {format, "Phase ~p: ~s", [Fitting, Reason]}, State} + end. + +%% Decode a mapred query +%% {ok, ParsedInputs, ParsedQuery, Timeout}; +decode_mapred_query(Query, <<"application/json">>) -> + riak_kv_mapred_json:parse_request(Query); +decode_mapred_query(Query, <<"application/x-erlang-binary">>) -> + riak_kv_mapred_term:parse_request(Query); +decode_mapred_query(_Query, ContentType) -> + {error, {unknown_content_type, ContentType}}. + +%% PB can only return responses for one phase at a time, +%% so we have to build a message for each +msgs_for_results(Results, #state{req=Req, req_ctx=PipeCtx}) -> + msgs_for_results(Results, + Req#rpbmapredreq.content_type, + PipeCtx#pipe_ctx.has_mr_query, + []). + +msgs_for_results([{PhaseId, Results}|Rest], CType, HasMRQuery, Acc) -> + case encode_mapred_phase(Results, CType, HasMRQuery) of + {error, _}=Error -> + Error; + Encoded -> + Msg=#rpbmapredresp{phase=PhaseId, response=Encoded}, + msgs_for_results(Rest, CType, HasMRQuery, [Msg|Acc]) + end; +msgs_for_results([], _, _, Acc) -> + {ok, lists:reverse(Acc)}. + +%% Convert a map/reduce phase to the encoding requested +encode_mapred_phase(Res, <<"application/json">>, HasMRQuery) -> + Res1 = riak_kv_mapred_json:jsonify_bkeys(Res, HasMRQuery), + mochijson2:encode(Res1); +encode_mapred_phase(Res, <<"application/x-erlang-binary">>, _) -> + term_to_binary(Res); +encode_mapred_phase(_Res, ContentType, _) -> + {error, {unknown_content_type, ContentType}}. diff --git a/src/riak_kv_pb_object.erl b/src/riak_kv_pb_object.erl new file mode 100644 index 0000000000..e92b7762a4 --- /dev/null +++ b/src/riak_kv_pb_object.erl @@ -0,0 +1,303 @@ +%% ------------------------------------------------------------------- +%% +%% riak_kv_pb_object: Expose KV functionality to Protocol Buffers +%% +%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +%% @doc

The Object/Key PB service for Riak KV. This covers the +%% following request messages in the original protocol:

+%% +%%
+%%  3 - RpbGetClientIdReq
+%%  5 - RpbSetClientIdReq
+%%  9 - RpbGetReq
+%% 11 - RpbPutReq
+%% 13 - RpbDelReq
+%% 
+%% +%%

This service produces the following responses:

+%% +%%
+%%  4 - RpbGetClientIdResp
+%%  6 - RpbSetClientIdResp
+%% 10 - RpbGetResp
+%% 12 - RpbPutResp - 0 length
+%% 14 - RpbDelResp
+%% 
+%% +%%

The semantics are unchanged from their original +%% implementations.

+%% @end + +-module(riak_kv_pb_object). + +-include_lib("riak_pb/include/riak_kv_pb.hrl"). +-include_lib("riak_pb/include/riak_pb_kv_codec.hrl"). + +-behaviour(riak_api_pb_service). + +-export([init/0, + decode/2, + encode/1, + process/2, + process_stream/3]). + +-import(riak_pb_kv_codec, [decode_quorum/1]). + +-record(state, {client, % local client + req, % current request (for multi-message requests like list keys) + req_ctx, % context to go along with request (partial results, request ids etc) + client_id = <<0,0,0,0>> }). % emulate legacy API when vnode_vclocks is true + +-define(DEFAULT_TIMEOUT, 60000). + +%% @doc init/0 callback. Returns the service internal start +%% state. +-spec init() -> any(). +init() -> + {ok, C} = riak:local_client(), + #state{client=C}. + +%% @doc decode/2 callback. Decodes an incoming message. +decode(Code, Bin) -> + {ok, riak_pb_codec:decode(Code, Bin)}. + +%% @doc encode/1 callback. Encodes an outgoing response message. +encode(Message) -> + {ok, riak_pb_codec:encode(Message)}. + +%% @doc process/2 callback. Handles an incoming request message. +process(rpbgetclientidreq, #state{client=C, client_id=CID} = State) -> + ClientId = case app_helper:get_env(riak_kv, vnode_vclocks, false) of + true -> CID; + false -> C:get_client_id() + end, + Resp = #rpbgetclientidresp{client_id = ClientId}, + {reply, Resp, State}; + +process(#rpbsetclientidreq{client_id = ClientId}, State) -> + NewState = case app_helper:get_env(riak_kv, vnode_vclocks, false) of + true -> State#state{client_id=ClientId}; + false -> + {ok, C} = riak:local_client(ClientId), + State#state{client = C} + end, + {reply, rpbsetclientidresp, NewState}; + +process(#rpbgetreq{bucket=B, key=K, r=R0, pr=PR0, notfound_ok=NFOk, + basic_quorum=BQ, if_modified=VClock, + head=Head, deletedvclock=DeletedVClock}, #state{client=C} = State) -> + R = decode_quorum(R0), + PR = decode_quorum(PR0), + case C:get(B, K, make_option(deletedvclock, DeletedVClock) ++ + make_option(r, R) ++ + make_option(pr, PR) ++ + make_option(notfound_ok, NFOk) ++ + make_option(basic_quorum, BQ)) of + {ok, O} -> + case erlify_rpbvc(VClock) == riak_object:vclock(O) of + true -> + {reply, #rpbgetresp{unchanged = true}, State}; + _ -> + Contents = riak_object:get_contents(O), + PbContent = case Head of + true -> + %% Remove all the 'value' fields from the contents + %% This is a rough equivalent of a REST HEAD + %% request + BlankContents = [{MD, <<>>} || {MD, _} <- Contents], + riak_pb_kv_codec:encode_contents(BlankContents); + _ -> + riak_pb_kv_codec:encode_contents(Contents) + end, + {reply, #rpbgetresp{content = PbContent, + vclock = pbify_rpbvc(riak_object:vclock(O))}, State} + end; + {error, {deleted, TombstoneVClock}} -> + %% Found a tombstone - return its vector clock so it can + %% be properly overwritten + {reply, #rpbgetresp{vclock = pbify_rpbvc(TombstoneVClock)}, State}; + {error, notfound} -> + {reply, #rpbgetresp{}, State}; + {error, Reason} -> + {error, {format,Reason}, State} + end; + +process(#rpbputreq{bucket=B, key=K, vclock=PbVC, + if_not_modified=NotMod, if_none_match=NoneMatch} = Req, + #state{client=C} = State) when NotMod; NoneMatch -> + case C:get(B, K) of + {ok, _} when NoneMatch -> + {error, "match_found", State}; + {ok, O} when NotMod -> + case erlify_rpbvc(PbVC) == riak_object:vclock(O) of + true -> + process(Req#rpbputreq{if_not_modified=undefined, + if_none_match=undefined}, + State); + _ -> + {error, "modified", State} + end; + {error, _} when NoneMatch -> + process(Req#rpbputreq{if_not_modified=undefined, + if_none_match=undefined}, + State); + {error, notfound} when NotMod -> + {error, "notfound", State}; + {error, Reason} -> + {error, {format, Reason}, State} + end; + +process(#rpbputreq{bucket=B, key=K, vclock=PbVC, content=RpbContent, + w=W0, dw=DW0, pw=PW0, return_body=ReturnBody, + return_head=ReturnHead}, + #state{client=C} = State) -> + + case K of + undefined -> + %% Generate a key, the user didn't supply one + Key = list_to_binary(riak_core_util:unique_id_62()), + ReturnKey = Key; + _ -> + Key = K, + %% Don't return the key since we're not generating one + ReturnKey = undefined + end, + O0 = riak_object:new(B, Key, <<>>), + O1 = update_rpbcontent(O0, RpbContent), + O = update_pbvc(O1, PbVC), + %% erlang_protobuffs encodes as 1/0/undefined + W = decode_quorum(W0), + DW = decode_quorum(DW0), + PW = decode_quorum(PW0), + Options = case ReturnBody of + 1 -> [returnbody]; + true -> [returnbody]; + _ -> + case ReturnHead of + true -> [returnbody]; + _ -> [] + end + end, + case C:put(O, make_option(w, W) ++ make_option(dw, DW) ++ + make_option(pw, PW) ++ [{timeout, default_timeout()} | Options]) of + ok when is_binary(ReturnKey) -> + PutResp = #rpbputresp{key = ReturnKey}, + {reply, PutResp, State}; + ok -> + {reply, #rpbputresp{}, State}; + {ok, Obj} -> + Contents = riak_object:get_contents(Obj), + PbContents = case ReturnHead of + true -> + %% Remove all the 'value' fields from the contents + %% This is a rough equivalent of a REST HEAD + %% request + BlankContents = [{MD, <<>>} || {MD, _} <- Contents], + riak_pb_kv_codec:encode_contents(BlankContents); + _ -> + riak_pb_kv_codec:encode_contents(Contents) + end, + PutResp = #rpbputresp{content = PbContents, + vclock = pbify_rpbvc(riak_object:vclock(Obj)), + key = ReturnKey + }, + {reply, PutResp, State}; + {error, notfound} -> + {reply, #rpbputresp{}, State}; + {error, Reason} -> + {error, {format, Reason}, State} + end; + +process(#rpbdelreq{bucket=B, key=K, vclock=PbVc, + r=R0, w=W0, pr=PR0, pw=PW0, dw=DW0, rw=RW0}, + #state{client=C} = State) -> + W = decode_quorum(W0), + PW = decode_quorum(PW0), + DW = decode_quorum(DW0), + R = decode_quorum(R0), + PR = decode_quorum(PR0), + RW = decode_quorum(RW0), + + Options = make_option(r, R) ++ + make_option(w, W) ++ + make_option(rw, RW) ++ + make_option(pr, PR) ++ + make_option(pw, PW) ++ + make_option(dw, DW), + Result = case PbVc of + undefined -> + C:delete(B, K, Options); + _ -> + VClock = erlify_rpbvc(PbVc), + C:delete_vclock(B, K, VClock, Options) + end, + case Result of + ok -> + {reply, rpbdelresp, State}; + {error, notfound} -> %% delete succeeds if already deleted + {reply, rpbdelresp, State}; + {error, Reason} -> + {error, {format, Reason}, State} + end. + +%% @doc process_stream/3 callback. This service does not create any +%% streaming responses and so ignores all incoming messages. +process_stream(_,_,State) -> + {ignore, State}. + +%% =================================================================== +%% Internal functions +%% =================================================================== + +%% Update riak_object with the pbcontent provided +update_rpbcontent(O0, RpbContent) -> + {MetaData, Value} = riak_pb_kv_codec:decode_content(RpbContent), + O1 = riak_object:update_metadata(O0, MetaData), + riak_object:update_value(O1, Value). + +%% Update riak_object with vector clock +update_pbvc(O0, PbVc) -> + Vclock = erlify_rpbvc(PbVc), + riak_object:set_vclock(O0, Vclock). + +%% return a key/value tuple that we can ++ to other options so long as the +%% value is not default or undefined -- those values are pulled from the +%% bucket by the get/put FSMs. +make_option(_, undefined) -> + []; +make_option(_, default) -> + []; +make_option(K, V) -> + [{K, V}]. + +%% Convert a vector clock to erlang +erlify_rpbvc(undefined) -> + vclock:fresh(); +erlify_rpbvc(<<>>) -> + vclock:fresh(); +erlify_rpbvc(PbVc) -> + binary_to_term(zlib:unzip(PbVc)). + +%% Convert a vector clock to protocol buffers +pbify_rpbvc(Vc) -> + zlib:zip(term_to_binary(Vc)). + +default_timeout() -> + ?DEFAULT_TIMEOUT. diff --git a/src/riak_kv_pb_socket.erl b/src/riak_kv_pb_socket.erl deleted file mode 100644 index 465e44e577..0000000000 --- a/src/riak_kv_pb_socket.erl +++ /dev/null @@ -1,646 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_pb_socket: service protocol buffer clients -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc service protocol buffer clients - --module(riak_kv_pb_socket). --ifdef(TEST). --include_lib("eunit/include/eunit.hrl"). --endif. --include_lib("riakc/include/riakclient_pb.hrl"). --include_lib("riakc/include/riakc_pb.hrl"). --include_lib("riak_pipe/include/riak_pipe.hrl"). --behaviour(gen_server). - --export([start_link/0, set_socket/2]). --export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3]). - --type msg() :: atom() | tuple(). - --record(state, {sock, % protocol buffers socket - client, % local client - req, % current request (for multi-message requests like list keys) - req_ctx, % context to go along with request (partial results, request ids etc) - client_id = <<0,0,0,0>> }). % emulate legacy API when vnode_vclocks is true - --record(pipe_ctx, {pipe, % pipe handling mapred request - ref, % easier-access ref/reqid - timer, % ref() for timeout send_after - sender, % {pid(), monitor()} of process sending inputs - has_mr_query}). % true if the request contains a query. - --define(PROTO_MAJOR, 1). --define(PROTO_MINOR, 0). --define(DEFAULT_TIMEOUT, 60000). - -%% =================================================================== -%% Public API -%% =================================================================== - -start_link() -> - gen_server2:start_link(?MODULE, [], []). - -set_socket(Pid, Socket) -> - gen_server2:call(Pid, {set_socket, Socket}). - -init([]) -> - riak_kv_stat:update(pbc_connect), - {ok, C} = riak:local_client(), - {ok, #state{client = C}}. - -handle_call({set_socket, Socket}, _From, State) -> - inet:setopts(Socket, [{active, once}, {packet, 4}, {header, 1}]), - {reply, ok, State#state{sock = Socket}}. - -handle_cast(_Msg, State) -> - {noreply, State}. - -handle_info({tcp_closed, Socket}, State=#state{sock=Socket}) -> - {stop, normal, State}; -handle_info({tcp_error, Socket, _Reason}, State=#state{sock=Socket}) -> - {stop, normal, State}; -handle_info({tcp, _Sock, Data}, State=#state{sock=Socket, req=undefined}) -> - [MsgCode|MsgData] = Data, - Msg = riakc_pb:decode(MsgCode, MsgData), - case process_message(Msg, State) of - {pause, NewState} -> - ok; - NewState -> - inet:setopts(Socket, [{active, once}]) - end, - {noreply, NewState}; -handle_info({tcp, _Sock, _Data}, State) -> - %% req =/= undefined: received a new request while another was in - %% progress -> Error - lager:error("Received a new PB socket request" - " while another was in progress"), - {stop, normal, State}; - -%% Handle responses from stream_list_keys -handle_info({ReqId, done}, - State=#state{sock = Socket, req=#rpblistkeysreq{}, req_ctx=ReqId}) -> - NewState = send_msg(#rpblistkeysresp{done = 1}, State), - inet:setopts(Socket, [{active, once}]), - {noreply, NewState#state{req = undefined, req_ctx = undefined}}; -handle_info({ReqId, From, {keys, []}}, State=#state{req=#rpblistkeysreq{}, req_ctx=ReqId}) -> - riak_kv_keys_fsm:ack_keys(From), - {noreply, State}; % No keys - no need to send a message, will send done soon. -handle_info({ReqId, {keys, []}}, State=#state{req=#rpblistkeysreq{}, req_ctx=ReqId}) -> - {noreply, State}; % No keys - no need to send a message, will send done soon. -handle_info({ReqId, From, {keys, Keys}}, State=#state{req=#rpblistkeysreq{}, req_ctx=ReqId}) -> - riak_kv_keys_fsm:ack_keys(From), - {noreply, send_msg(#rpblistkeysresp{keys = Keys}, State)}; -handle_info({ReqId, {keys, Keys}}, State=#state{req=#rpblistkeysreq{}, req_ctx=ReqId}) -> - {noreply, send_msg(#rpblistkeysresp{keys = Keys}, State)}; -handle_info({ReqId, Error}, - State=#state{sock = Socket, req=#rpblistkeysreq{}, req_ctx=ReqId}) -> - NewState = send_error("~p", [Error], State), - inet:setopts(Socket, [{active, once}]), - {noreply, NewState#state{req = undefined, req_ctx = undefined}}; - -%% PIPE Handle response from mapred_stream -handle_info(#pipe_eoi{ref=ReqId}, - State=#state{req=#rpbmapredreq{}, - req_ctx=#pipe_ctx{ref=ReqId, - timer=Timer}}) -> - NewState = send_msg(#rpbmapredresp{done = 1}, State), - erlang:cancel_timer(Timer), - {noreply, NewState#state{req = undefined, req_ctx = undefined}}; - -handle_info(#pipe_result{ref=ReqId, from=PhaseId, result=Res}, - State=#state{req=#rpbmapredreq{content_type = ContentType}, - req_ctx=#pipe_ctx{ref=ReqId, has_mr_query=HasMRQuery}=PipeCtx}) -> - case encode_mapred_phase([Res], ContentType, HasMRQuery) of - {error, Reason} -> - erlang:cancel_timer(PipeCtx#pipe_ctx.timer), - %% destroying the pipe will automatically kill the sender - riak_pipe:destroy(PipeCtx#pipe_ctx.pipe), - NewState = send_error("~p", [Reason], State), - {noreply, NewState#state{req = undefined, req_ctx = undefined}}; - Response -> - {noreply, send_msg(#rpbmapredresp{phase=PhaseId, - response=Response}, State)} - end; -handle_info(#pipe_log{ref=ReqId, from=From, msg=Msg}, - State=#state{req=#rpbmapredreq{}, - req_ctx=#pipe_ctx{ref=ReqId}=PipeCtx}) -> - case Msg of - {trace, [error], {error, Info}} -> - erlang:cancel_timer(PipeCtx#pipe_ctx.timer), - %% destroying the pipe will automatically kill the sender - riak_pipe:destroy(PipeCtx#pipe_ctx.pipe), - JsonInfo = {struct, riak_kv_mapred_json:jsonify_pipe_error( - From, Info)}, - NewState = send_error(mochijson2:encode(JsonInfo), [], State), - {noreply, NewState#state{req = undefined, req_ctx = undefined}}; - _ -> - {noreply, State} - end; -handle_info({'DOWN', Ref, process, Pid, Reason}, - State=#state{req=#rpbmapredreq{}, - req_ctx=#pipe_ctx{sender={Pid, Ref}}=PipeCtx}) -> - %% the async input sender exited - if Reason == normal -> - %% just reached the end of the input sending - all is - %% well, continue processing - NewPipeCtx = PipeCtx#pipe_ctx{sender=undefined}, - {noreply, State#state{req_ctx=NewPipeCtx}}; - true -> - %% something went wrong sending inputs - tell the client - %% about it, and shutdown the pipe - erlang:cancel_timer(PipeCtx#pipe_ctx.timer), - riak_pipe:destroy(PipeCtx#pipe_ctx.pipe), - lager:error("Error sending inputs: ~p", [Reason]), - NewState = send_error("Error sending inputs: ~p", [Reason], State), - {noreply, NewState#state{req=undefined, req_ctx=undefined}} - end; -handle_info({pipe_timeout, Ref}, - State=#state{req=#rpbmapredreq{}, - req_ctx=#pipe_ctx{ref=Ref, - pipe=Pipe}}) -> - NewState = send_error("timeout", [], State), - %% destroying the pipe will automatically kill the sender - riak_pipe:destroy(Pipe), - {noreply, NewState#state{req=undefined, req_ctx=undefined}}; -%% ignore #pipe_log for now, since riak_kv_mrc_pipe does not enable it - -%% LEGACY Handle response from mapred_stream/mapred_bucket_stream -handle_info({flow_results, ReqId, done}, - State=#state{sock = Socket, req=#rpbmapredreq{}, req_ctx=ReqId}) -> - NewState = send_msg(#rpbmapredresp{done = 1}, State), - inet:setopts(Socket, [{active, once}]), - {noreply, NewState#state{req = undefined, req_ctx = undefined}}; - -handle_info({flow_results, ReqId, {error, Reason}}, - State=#state{sock = Socket, req=#rpbmapredreq{}, req_ctx=ReqId}) -> - NewState = send_error("~p", [Reason], State), - inet:setopts(Socket, [{active, once}]), - {noreply, NewState#state{req = undefined, req_ctx = undefined}}; - -handle_info({flow_results, PhaseId, ReqId, Res}, - State=#state{sock=Socket, - req=#rpbmapredreq{content_type = ContentType}, - req_ctx=ReqId}) -> - case encode_mapred_phase(Res, ContentType, true) of - {error, Reason} -> - NewState = send_error("~p", [Reason], State), - inet:setopts(Socket, [{active, once}]), - {noreply, NewState#state{req = undefined, req_ctx = undefined}}; - Response -> - {noreply, send_msg(#rpbmapredresp{phase=PhaseId, - response=Response}, State)} - end; - -handle_info({flow_error, ReqId, Error}, - State=#state{sock = Socket, req=#rpbmapredreq{}, req_ctx=ReqId}) -> - NewState = send_error("~p", [Error], State), - inet:setopts(Socket, [{active, once}]), - {noreply, NewState#state{req = undefined, req_ctx = undefined}}; - -handle_info(_, State) -> % Ignore any late replies from gen_servers/messages from fsms - {noreply, State}. - -terminate(_Reason, _State) -> - riak_kv_stat:update(pbc_disconnect), - ok. - -code_change(_OldVsn, State, _Extra) -> {ok, State}. - -%% =================================================================== -%% Message Handling -%% =================================================================== - -%% Process an incoming protocol buffers message. Return either -%% a new #state{} if new incoming messages should be received -%% or {pause, #state{}} if the incoming TCP socket should not be -%% set active again. -%% -%% If 'pause' is returned, it needs to be re-enabled by whatever -%% callbacks are waiting for it. -%% --spec process_message(msg(), #state{}) -> #state{} | {pause, #state{}}. -process_message(rpbpingreq, State) -> - send_msg(rpbpingresp, State); - -process_message(rpbgetclientidreq, #state{client=C, client_id=CID} = State) -> - ClientId = case app_helper:get_env(riak_kv, vnode_vclocks, false) of - true -> CID; - false -> C:get_client_id() - end, - Resp = #rpbgetclientidresp{client_id = ClientId}, - send_msg(Resp, State); - -process_message(#rpbsetclientidreq{client_id = ClientId}, State) -> - NewState = case app_helper:get_env(riak_kv, vnode_vclocks, false) of - true -> State#state{client_id=ClientId}; - false -> - {ok, C} = riak:local_client(ClientId), - State#state{client = C} - end, - send_msg(rpbsetclientidresp, NewState); - -process_message(rpbgetserverinforeq, State) -> - Resp = #rpbgetserverinforesp{node = riakc_pb:to_binary(node()), - server_version = get_riak_version()}, - send_msg(Resp, State); - -process_message(#rpbgetreq{bucket=B, key=K, r=R0, pr=PR0, notfound_ok=NFOk, - basic_quorum=BQ, if_modified=VClock, - head=Head, deletedvclock=DeletedVClock}, #state{client=C} = State) -> - R = normalize_rw_value(R0), - PR = normalize_rw_value(PR0), - case C:get(B, K, make_option(deletedvclock, DeletedVClock) ++ - make_option(r, R) ++ - make_option(pr, PR) ++ - make_option(notfound_ok, NFOk) ++ - make_option(basic_quorum, BQ)) of - {ok, O} -> - case erlify_rpbvc(VClock) == riak_object:vclock(O) of - true -> - send_msg(#rpbgetresp{unchanged = true}, State); - _ -> - Contents = riak_object:get_contents(O), - PbContent = case Head of - true -> - %% Remove all the 'value' fields from the contents - %% This is a rough equivalent of a REST HEAD - %% request - BlankContents = [{MD, <<>>} || {MD, _} <- Contents], - riakc_pb:pbify_rpbcontents(BlankContents, []); - _ -> - riakc_pb:pbify_rpbcontents(Contents, []) - end, - GetResp = #rpbgetresp{content = PbContent, - vclock = pbify_rpbvc(riak_object:vclock(O))}, - send_msg(GetResp, State) - end; - {error, {deleted, TombstoneVClock}} -> - %% Found a tombstone - return its vector clock so it can - %% be properly overwritten - send_msg(#rpbgetresp{vclock = pbify_rpbvc(TombstoneVClock)}, State); - {error, notfound} -> - send_msg(#rpbgetresp{}, State); - {error, Reason} -> - send_error("~p", [Reason], State) - end; - -process_message(#rpbputreq{bucket=B, key=K, vclock=PbVC, - if_not_modified=NotMod, if_none_match=NoneMatch} = Req, - #state{client=C} = State) when NotMod; NoneMatch -> - case C:get(B, K) of - {ok, _} when NoneMatch -> - send_error("match_found", [], State); - {ok, O} when NotMod -> - case erlify_rpbvc(PbVC) == riak_object:vclock(O) of - true -> - process_message(Req#rpbputreq{if_not_modified=undefined, - if_none_match=undefined}, - State); - _ -> - send_error("modified", [], State) - end; - {error, _} when NoneMatch -> - process_message(Req#rpbputreq{if_not_modified=undefined, - if_none_match=undefined}, - State); - {error, notfound} when NotMod -> - send_error("notfound", [], State); - {error, Reason} -> - send_error("~p", [Reason], State) - end; -process_message(#rpbputreq{bucket=B, key=K, vclock=PbVC, content=RpbContent, - w=W0, dw=DW0, pw=PW0, return_body=ReturnBody, - return_head=ReturnHead}, - #state{client=C} = State) -> - - case K of - undefined -> - % Generate a key, the user didn't supply one - Key = list_to_binary(riak_core_util:unique_id_62()), - ReturnKey = Key; - _ -> - Key = K, - % Don't return the key since we're not generating one - ReturnKey = undefined - end, - O0 = riak_object:new(B, Key, <<>>), - O1 = update_rpbcontent(O0, RpbContent), - O = update_pbvc(O1, PbVC), - % erlang_protobuffs encodes as 1/0/undefined - W = normalize_rw_value(W0), - DW = normalize_rw_value(DW0), - PW = normalize_rw_value(PW0), - Options = case ReturnBody of - 1 -> [returnbody]; - true -> [returnbody]; - _ -> - case ReturnHead of - true -> [returnbody]; - _ -> [] - end - end, - case C:put(O, make_option(w, W) ++ make_option(dw, DW) ++ - make_option(pw, PW) ++ [{timeout, default_timeout()} | Options]) of - ok when is_binary(ReturnKey) -> - PutResp = #rpbputresp{key = ReturnKey}, - send_msg(PutResp, State); - ok -> - send_msg(#rpbputresp{}, State); - {ok, Obj} -> - Contents = riak_object:get_contents(Obj), - PbContents = case ReturnHead of - true -> - %% Remove all the 'value' fields from the contents - %% This is a rough equivalent of a REST HEAD - %% request - BlankContents = [{MD, <<>>} || {MD, _} <- Contents], - riakc_pb:pbify_rpbcontents(BlankContents, []); - _ -> - riakc_pb:pbify_rpbcontents(Contents, []) - end, - PutResp = #rpbputresp{content = PbContents, - vclock = pbify_rpbvc(riak_object:vclock(Obj)), - key = ReturnKey - }, - send_msg(PutResp, State); - {error, notfound} -> - send_msg(#rpbputresp{}, State); - {error, Reason} -> - send_error("~p", [Reason], State) - end; - -process_message(#rpbdelreq{bucket=B, key=K, vclock=PbVc, - r=R0, w=W0, pr=PR0, pw=PW0, dw=DW0, rw=RW0}, - #state{client=C} = State) -> - W = normalize_rw_value(W0), - PW = normalize_rw_value(PW0), - DW = normalize_rw_value(DW0), - R = normalize_rw_value(R0), - PR = normalize_rw_value(PR0), - RW = normalize_rw_value(RW0), - - Options = make_option(r, R) ++ - make_option(w, W) ++ - make_option(rw, RW) ++ - make_option(pr, PR) ++ - make_option(pw, PW) ++ - make_option(dw, DW), - Result = case PbVc of - undefined -> - C:delete(B, K, Options); - _ -> - VClock = erlify_rpbvc(PbVc), - C:delete_vclock(B, K, VClock, Options) - end, - case Result of - ok -> - send_msg(rpbdelresp, State); - {error, notfound} -> %% delete succeeds if already deleted - send_msg(rpbdelresp, State); - {error, Reason} -> - send_error("~p", [Reason], State) - end; - -process_message(rpblistbucketsreq, - #state{client=C} = State) -> - case C:list_buckets() of - {ok, Buckets} -> - send_msg(#rpblistbucketsresp{buckets = Buckets}, State); - {error, Reason} -> - send_error("~p", [Reason], State) - end; - -%% Start streaming in list keys -process_message(#rpblistkeysreq{bucket=B}=Req, - #state{client=C} = State) -> - %% Pause incoming packets - stream_list_keys results - %% will be processed by handle_info, it will - %% set socket active again on completion of streaming. - {ok, ReqId} = C:stream_list_keys(B), - {pause, State#state{req = Req, req_ctx = ReqId}}; - -%% Get bucket properties -process_message(#rpbgetbucketreq{bucket=B}, - #state{client=C} = State) -> - Props = C:get_bucket(B), - PbProps = riakc_pb:pbify_rpbbucketprops(Props), - send_msg(#rpbgetbucketresp{props = PbProps}, State); - -%% Set bucket properties -process_message(#rpbsetbucketreq{bucket=B, props = PbProps}, - #state{client=C} = State) -> - Props = riakc_pb:erlify_rpbbucketprops(PbProps), - case C:set_bucket(B, Props) of - ok -> - send_msg(rpbsetbucketresp, State); - {error, Details} -> - send_error("Invalid bucket properties: ~p", [Details], State) - end; - -%% TODO: refactor, cleanup -%% Start map/reduce job - results will be processed in handle_info -process_message(#rpbmapredreq{request=MrReq, content_type=ContentType}=Req, - State) -> - case decode_mapred_query(MrReq, ContentType) of - {error, Reason} -> - send_error("~p", [Reason], State); - - {ok, Inputs, Query, Timeout} -> - case riak_kv_util:mapred_system() of - pipe -> - pipe_mapreduce(Req, State, Inputs, Query, Timeout); - legacy -> - legacy_mapreduce(Req, State, Inputs, Query, Timeout) - end - end. - -pipe_mapreduce(Req, State, Inputs, Query, Timeout) -> - try riak_kv_mrc_pipe:mapred_stream(Query) of - {{ok, Pipe}, _NumKeeps} -> - PipeRef = (Pipe#pipe.sink)#fitting.ref, - Timer = erlang:send_after(Timeout, self(), - {pipe_timeout, PipeRef}), - {InputSender, SenderMonitor} = - riak_kv_mrc_pipe:send_inputs_async(Pipe, Inputs), - Ctx = #pipe_ctx{pipe=Pipe, - ref=PipeRef, - timer=Timer, - sender={InputSender, SenderMonitor}, - has_mr_query = (Query /= [])}, - State#state{req=Req, req_ctx=Ctx} - catch throw:{badarg, Fitting, Reason} -> - send_error("Phase ~p: ~s", [Fitting, Reason], State), - State - end. - -legacy_mapreduce(#rpbmapredreq{content_type=ContentType}=Req, - #state{client=C}=State, Inputs, Query, Timeout) -> - ResultTransformer = get_result_transformer(ContentType), - case is_binary(Inputs) orelse is_key_filter(Inputs) of - true -> - case C:mapred_bucket_stream(Inputs, Query, - self(), ResultTransformer, Timeout) of - {stop, Error} -> - send_error("~p", [Error], State); - - {ok, ReqId} -> - {pause, State#state{req = Req, req_ctx = ReqId}} - end; - false -> - case is_list(Inputs) of - true -> - case C:mapred_stream(Query, self(), ResultTransformer, Timeout) of - {stop, Error} -> - send_error("~p", [Error], State); - - {ok, {ReqId, FSM}} -> - luke_flow:add_inputs(FSM, Inputs), - luke_flow:finish_inputs(FSM), - %% Pause incoming packets - map/reduce results - %% will be processed by handle_info, it will - %% set socket active again on completion of streaming. - {pause, State#state{req = Req, req_ctx = ReqId}} - end; - false -> - case is_tuple(Inputs) andalso size(Inputs)==4 andalso - element(1, Inputs) == modfun andalso - is_atom(element(2, Inputs)) andalso - is_atom(element(3, Inputs)) of - true -> - case C:mapred_stream(Query, self(), ResultTransformer, Timeout) of - {stop, Error} -> - send_error("~p", [Error], State); - - {ok, {ReqId, FSM}} -> - C:mapred_dynamic_inputs_stream( - FSM, Inputs, Timeout), - luke_flow:finish_inputs(FSM), - %% Pause incoming packets - map/reduce results - %% will be processed by handle_info, it will - %% set socket active again on completion of streaming. - {pause, State#state{req = Req, req_ctx = ReqId}} - end; - false -> - send_error("~p", [bad_mapred_inputs], State) - end - end - end. - -%% Send a message to the client --spec send_msg(msg(), #state{}) -> #state{}. -send_msg(Msg, State) -> - Pkt = riakc_pb:encode(Msg), - gen_tcp:send(State#state.sock, Pkt), - State. - -%% Send an error to the client --spec send_error(string(), list(), #state{}) -> #state{}. -send_error(Msg, Fmt, State) -> - send_error(Msg, Fmt, ?RIAKC_ERR_GENERAL, State). - --spec send_error(string(), list(), non_neg_integer(), #state{}) -> #state{}. -send_error(Msg, Fmt, ErrCode, State) -> - %% protocol buffers accepts nested lists for binaries so no need to flatten the list - ErrMsg = io_lib:format(Msg, Fmt), - send_msg(#rpberrorresp{errmsg=ErrMsg, errcode=ErrCode}, State). - -%% Update riak_object with the pbcontent provided -update_rpbcontent(O0, RpbContent) -> - {MetaData, Value} = riakc_pb:erlify_rpbcontent(RpbContent), - O1 = riak_object:update_metadata(O0, MetaData), - riak_object:update_value(O1, Value). - -%% Update riak_object with vector clock -update_pbvc(O0, PbVc) -> - Vclock = erlify_rpbvc(PbVc), - riak_object:set_vclock(O0, Vclock). - -%% return a key/value tuple that we can ++ to other options so long as the -%% value is not default or undefined -- those values are pulled from the -%% bucket by the get/put FSMs. -make_option(_, undefined) -> - []; -make_option(_, default) -> - []; -make_option(K, V) -> - [{K, V}]. - -default_timeout() -> - 60000. - -%% Convert a vector clock to erlang -erlify_rpbvc(undefined) -> - vclock:fresh(); -erlify_rpbvc(<<>>) -> - vclock:fresh(); -erlify_rpbvc(PbVc) -> - binary_to_term(zlib:unzip(PbVc)). - -%% Convert a vector clock to protocol buffers -pbify_rpbvc(Vc) -> - zlib:zip(term_to_binary(Vc)). - -%% Return the current version of riak_kv --spec get_riak_version() -> binary(). -get_riak_version() -> - {ok, Vsn} = application:get_key(riak_kv, vsn), - riakc_pb:to_binary(Vsn). - -%% Decode a mapred query -%% {ok, ParsedInputs, ParsedQuery, Timeout}; -decode_mapred_query(Query, <<"application/json">>) -> - riak_kv_mapred_json:parse_request(Query); -decode_mapred_query(Query, <<"application/x-erlang-binary">>) -> - riak_kv_mapred_term:parse_request(Query); -decode_mapred_query(_Query, ContentType) -> - {error, {unknown_content_type, ContentType}}. - -%% Detect key filtering -is_key_filter({Bucket, Filters}) when is_binary(Bucket), - is_list(Filters) -> - true; -is_key_filter(_) -> - false. - -%% Convert a map/reduce phase to the encoding requested -encode_mapred_phase(Res, <<"application/json">>, HasMRQuery) -> - Res1 = riak_kv_mapred_json:jsonify_bkeys(Res, HasMRQuery), - mochijson2:encode(Res1); -encode_mapred_phase(Res, <<"application/x-erlang-binary">>, _) -> - term_to_binary(Res); -encode_mapred_phase(_Res, ContentType, _) -> - {error, {unknown_content_type, ContentType}}. - -normalize_rw_value(?RIAKC_RW_ONE) -> one; -normalize_rw_value(?RIAKC_RW_QUORUM) -> quorum; -normalize_rw_value(?RIAKC_RW_ALL) -> all; -normalize_rw_value(?RIAKC_RW_DEFAULT) -> default; -normalize_rw_value(V) -> V. - -%% get a result transformer for the content-type -%% jsonify not_founds for application/json -%% do nothing otherwise -get_result_transformer(<<"application/json">>) -> - fun riak_kv_mapred_json:jsonify_not_found/1; -get_result_transformer(_) -> - undefined. diff --git a/src/riak_kv_pb_socket_sup.erl b/src/riak_kv_pb_socket_sup.erl deleted file mode 100644 index 4cfc5fa525..0000000000 --- a/src/riak_kv_pb_socket_sup.erl +++ /dev/null @@ -1,44 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_pb_socket_sup: supervise riak_kv_pb_socket processes -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc supervise riak_kv_pb_socket processes - --module(riak_kv_pb_socket_sup). --behaviour(supervisor). --export([start_link/0, init/1, stop/1]). --export([start_socket/0]). - -start_socket() -> - supervisor:start_child(?MODULE, []). - -start_link() -> - supervisor:start_link({local, ?MODULE}, ?MODULE, []). - -stop(_S) -> ok. - -%% @private -init([]) -> - {ok, - {{simple_one_for_one, 10, 10}, - [{undefined, - {riak_kv_pb_socket, start_link, []}, - temporary, brutal_kill, worker, [riak_kv_pb_socket]}]}}. diff --git a/src/riak_kv_phase_proto.erl b/src/riak_kv_phase_proto.erl deleted file mode 100644 index d4f6545397..0000000000 --- a/src/riak_kv_phase_proto.erl +++ /dev/null @@ -1,35 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_phase_proto: prototype for riak dataflow phases -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc prototype for riak dataflow phases - --module(riak_kv_phase_proto). - --export([mapexec_result/5, - mapexec_error/3]). - -mapexec_result(PhasePid, VNode, BKey, Result, Id) -> - gen_fsm:send_event(PhasePid, {mapexec_reply, VNode, BKey, Result, Id}). - -mapexec_error(PhasePid, Error, Id) -> - gen_fsm:send_event(PhasePid, {mapexec_error, Id, Error}). - diff --git a/src/riak_kv_pipe_get.erl b/src/riak_kv_pipe_get.erl index 9b0e0f96de..cb06f81696 100644 --- a/src/riak_kv_pipe_get.erl +++ b/src/riak_kv_pipe_get.erl @@ -34,13 +34,16 @@ %% partition number as the Pipe vnode owning this worker. For this %% reason, it is important to use a `chashfun' for this fitting that %% gives the same answer as the consistent hashing function for the KV -%% object. +%% object. If the object is not found at the local KV vnode, each KV +%% vnode in the remainder of the object's primary preflist is tried in +%% sequence. %% %% If the object is found, the tuple `{ok, Object, Keydata}' is sent %% as output. If an error occurs looking up the object, and the %% preflist has been exhausted, the tuple `{Error, {Bucket, Key}, -%% KeyData}' is sent as output (where `Error' is usually `not_found'). -%% The atom `undefined' is used as `KeyData' if none is specified. +%% KeyData}' is sent as output (where `Error' is usually `{error, +%% notfound}'). The atom `undefined' is used as `KeyData' if none is +%% specified. -module(riak_kv_pipe_get). -behaviour(riak_pipe_vnode_worker). @@ -79,36 +82,65 @@ init(Partition, FittingDetails) -> %% @doc Lookup the bucket/key pair on the Riak KV vnode, and send it %% downstream. -spec process(riak_kv_mrc_pipe:key_input(), boolean(), state()) - -> {ok | forward_preflist | {error, term()}, state()}. + -> {ok | {error, term()}, state()}. process(Input, Last, #state{partition=Partition, fd=FittingDetails}=State) -> + %% assume local chashfun was used for initial attempt + case try_partition(Input, {Partition, node()}, FittingDetails) of + {error, _} when Last == false -> + {try_preflist(Input, State), State}; + Result -> + {send_output(Input, Result, State), State} + end. + +send_output(Input, {ok, Obj}, State) -> + send_output({ok, Obj, keydata(Input)}, State); +send_output(Input, Error, State) -> + send_output({Error, bkey(Input), keydata(Input)}, State). + +send_output(Output, #state{partition=Partition, fd=FittingDetails}) -> + riak_pipe_vnode_worker:send_output( + Output, Partition, FittingDetails). + +%% @doc Try the other primaries in the Input's preflist (skipping the +%% local vnode we already tried in {@link process/3}. +try_preflist(Input, #state{partition=P}=State) -> + %% pipe only uses primaries - mimicking that here, both to provide + %% continuity, and also to avoid a really long wait for a true + %% not-found + AnnPreflist = riak_core_apl:get_primary_apl( + bkey_chash(Input), bkey_nval(Input), riak_kv), + Preflist = [ V || {V, _A} <- AnnPreflist ], + %% remove the one we already tried + RestPreflist = Preflist--[{P, node()}], + try_preflist(Input, RestPreflist, State). + +%% helper function walking the remaining preflist +try_preflist(Input, [], State) -> + %% send not-found if no replicas gave us the value + send_output(Input, {error, notfound}, State); +try_preflist(Input, [NextV|Rest], #state{fd=FittingDetails}=State) -> + case try_partition(Input, NextV, FittingDetails) of + {ok,_}=Result -> + send_output(Input, Result, State); + _Error -> + try_preflist(Input, Rest, State) + end. + +try_partition(Input, Vnode, FittingDetails) -> ReqId = make_req_id(), + Start = os:timestamp(), riak_core_vnode_master:command( - {Partition, node()}, %% assume local chashfun was used + Vnode, ?KV_GET_REQ{bkey=bkey(Input), req_id=ReqId}, {raw, ReqId, self()}, riak_kv_vnode_master), receive {ReqId, {r, {ok, Obj}, _, _}} -> - case riak_pipe_vnode_worker:send_output( - {ok, Obj, keydata(Input)}, Partition, FittingDetails) of - ok -> - {ok, State}; - ER -> - {ER, State} - end; + ?T(FittingDetails, [kv_get], [{kv_get_latency, {r, timer:now_diff(os:timestamp(), Start)}}]), + {ok, Obj}; {ReqId, {r, {error, _} = Error, _, _}} -> - if Last -> - case riak_pipe_vnode_worker:send_output( - {Error, bkey(Input), keydata(Input)}, - Partition, FittingDetails) of - ok -> - {ok, State}; - ER -> - {ER, State} - end; - true -> - {forward_preflist, State} - end + ?T(FittingDetails, [kv_get], [{kv_get_latency, {Error, timer:now_diff(os:timestamp(), Start)}}]), + Error end. %% @doc Not used. @@ -117,7 +149,7 @@ done(_State) -> ok. make_req_id() -> - erlang:phash2(erlang:now()). % stolen from riak_client + erlang:phash2({self(), os:timestamp()}). % stolen from riak_client %% useful utilities diff --git a/src/riak_kv_pipe_index.erl b/src/riak_kv_pipe_index.erl index b144d88263..8ed751f4b4 100644 --- a/src/riak_kv_pipe_index.erl +++ b/src/riak_kv_pipe_index.erl @@ -68,11 +68,9 @@ process(Input, _Last, #state{p=Partition, fd=FittingDetails}=State) -> {Bucket, Query} -> FilterVNodes = [] end, - ReqId = erlang:phash2(erlang:now()), % stolen from riak_client + ReqId = erlang:phash2({self(), os:timestamp()}), % stolen from riak_client riak_core_vnode_master:coverage( - ?KV_INDEX_REQ{bucket=Bucket, - item_filter=none, %% riak_client uses nothing else? - qry=Query}, + riak_kv_index_fsm:req(Bucket, none, Query), {Partition, node()}, FilterVNodes, {raw, ReqId, self()}, @@ -83,6 +81,14 @@ keysend_loop(ReqId, Partition, FittingDetails) -> receive {ReqId, {error, _Reason} = ER} -> ER; + {ReqId, {From, Bucket, Keys}} -> + case keysend(Bucket, Keys, Partition, FittingDetails) of + ok -> + riak_kv_vnode:ack_keys(From), + keysend_loop(ReqId, Partition, FittingDetails); + ER -> + ER + end; {ReqId, {Bucket, Keys}} -> case keysend(Bucket, Keys, Partition, FittingDetails) of ok -> @@ -121,6 +127,9 @@ done(_State) -> %% to trigger querying on the appropriate vnodes. The `eoi' %% message is sent to the pipe as soon as it is confirmed that %% all querying processes have started. +%% +%% Note that log/trace messages are sent to the sink of the +%% original pipe. It is expected that that sink is an `fsm' type. -spec queue_existing_pipe(riak_pipe:pipe(), bucket_or_filter(), {eq, Index::binary(), Value::term()} @@ -131,15 +140,17 @@ done(_State) -> queue_existing_pipe(Pipe, Bucket, Query, Timeout) -> %% make our tiny pipe [{_Name, Head}|_] = Pipe#pipe.fittings, + Period = riak_kv_mrc_pipe:sink_sync_period(), {ok, LKP} = riak_pipe:exec([#fitting_spec{name=index, module=?MODULE, nval=1}], [{sink, Head}, {trace, [error]}, - {log, {sink, Pipe#pipe.sink}}]), + {log, {sink, Pipe#pipe.sink}}, + {sink_type, {fsm, Period, infinity}}]), %% setup the cover operation - ReqId = erlang:phash2(erlang:now()), %% stolen from riak_client + ReqId = erlang:phash2({self(), os:timestamp()}), %% stolen from riak_client BucketProps = riak_core_bucket:get_bucket(Bucket), NVal = proplists:get_value(n_val, BucketProps), {ok, Sender} = riak_pipe_qcover_sup:start_qcover_fsm( @@ -147,7 +158,23 @@ queue_existing_pipe(Pipe, Bucket, Query, Timeout) -> [LKP, {Bucket, Query}, NVal]]), %% wait for cover to hit everything - erlang:link(Sender), + {RealTO, TOReason} = + try erlang:link(Sender) of + true -> + %% Sender was alive - wait as expected + {Timeout, timeout} + catch error:noproc -> + %% Sender finished early; it's always spawned locally, + %% so we'll get a noproc exit, instead of an exit signal + + %% messages had better already be in our mailbox, + %% don't wait any extra time for them + {0, + %% we'll have no idea what its failure was, unless it + %% sent us an error message + index_coverage_failure} + end, + receive {ReqId, done} -> %% this eoi will flow into the other pipe @@ -157,8 +184,8 @@ queue_existing_pipe(Pipe, Bucket, Query, Timeout) -> %% this destroy should not harm the other pipe riak_pipe:destroy(LKP), Error - after Timeout -> + after RealTO -> %% this destroy should not harm the other pipe riak_pipe:destroy(LKP), - {error, timeout} + {error, TOReason} end. diff --git a/src/riak_kv_pipe_listkeys.erl b/src/riak_kv_pipe_listkeys.erl index a137d4ef61..4e00a649b5 100644 --- a/src/riak_kv_pipe_listkeys.erl +++ b/src/riak_kv_pipe_listkeys.erl @@ -73,7 +73,7 @@ process(Input, _Last, #state{p=Partition, fd=FittingDetails}=State) -> Filters = [], FilterVNodes = [] end, - ReqId = erlang:phash2(erlang:now()), % stolen from riak_client + ReqId = erlang:phash2({self(), os:timestamp()}), % stolen from riak_client riak_core_vnode_master:coverage( riak_kv_keys_fsm:req(Bucket, Filters), {Partition, node()}, @@ -133,6 +133,9 @@ done(_State) -> %% to trigger keylisting on the appropriate vnodes. The `eoi' %% message is sent to the pipe as soon as it is confirmed that %% all keylisting processes have started. +%% +%% Note that log/trace messages are sent to the sink of the +%% original pipe. It is expected that that sink is an `fsm' type. -spec queue_existing_pipe(riak_pipe:pipe(), bucket_or_filter(), timeout()) -> @@ -140,15 +143,17 @@ done(_State) -> queue_existing_pipe(Pipe, Bucket, Timeout) -> %% make our tiny pipe [{_Name, Head}|_] = Pipe#pipe.fittings, + Period = riak_kv_mrc_pipe:sink_sync_period(), {ok, LKP} = riak_pipe:exec([#fitting_spec{name=listkeys, module=?MODULE, nval=1}], [{sink, Head}, {trace, [error]}, - {log, {sink, Pipe#pipe.sink}}]), + {log, {sink, Pipe#pipe.sink}}, + {sink_type, {fsm, Period, infinity}}]), %% setup the cover operation - ReqId = erlang:phash2(erlang:now()), %% stolen from riak_client + ReqId = erlang:phash2({self(), os:timestamp()}), %% stolen from riak_client BucketProps = riak_core_bucket:get_bucket(Bucket), NVal = proplists:get_value(n_val, BucketProps), {ok, Sender} = riak_pipe_qcover_sup:start_qcover_fsm( @@ -156,7 +161,23 @@ queue_existing_pipe(Pipe, Bucket, Timeout) -> [LKP, Bucket, NVal]]), %% wait for cover to hit everything - erlang:link(Sender), + {RealTO, TOReason} = + try erlang:link(Sender) of + true -> + %% Sender was alive - wait as expected + {Timeout, timeout} + catch error:noproc -> + %% Sender finished early; it's always spawned locally, + %% so we'll get a noproc exit, instead of an exit signal + + %% messages had better already be in our mailbox, + %% don't wait any extra time for them + {0, + %% we'll have no idea what its failure was, unless it + %% sent us an error message + listkeys_coverage_failure} + end, + receive {ReqId, done} -> %% this eoi will flow into the other pipe @@ -166,8 +187,8 @@ queue_existing_pipe(Pipe, Bucket, Timeout) -> %% this destroy should not harm the other pipe riak_pipe:destroy(LKP), Error - after Timeout -> + after RealTO -> %% this destroy should not harm the other pipe riak_pipe:destroy(LKP), - {error, timeout} + {error, TOReason} end. diff --git a/src/riak_kv_put_core.erl b/src/riak_kv_put_core.erl index dd5ac60434..04c8ddc86a 100644 --- a/src/riak_kv_put_core.erl +++ b/src/riak_kv_put_core.erl @@ -21,7 +21,7 @@ %% ------------------------------------------------------------------- -module(riak_kv_put_core). -export([init/7, add_result/2, enough/1, response/1, - final/1]). + final/1, result_shortcode/1, result_idx/1]). -export_type([putcore/0, result/0, reply/0]). -type vput_result() :: any(). @@ -134,6 +134,16 @@ final(PutCore = #putcore{final_obj = FinalObj, {FinalObj, PutCore} end. +result_shortcode({w, _, _}) -> 1; +result_shortcode({dw, _, _}) -> 2; +result_shortcode({dw, _, _, _}) -> 2; +result_shortcode({fail, _, _}) -> -1; +result_shortcode(_) -> -2. + +result_idx({_, Idx, _}) -> Idx; +result_idx({_, Idx, _, _}) -> Idx; +result_idx(_) -> -1. + %% ==================================================================== %% Internal functions %% ==================================================================== diff --git a/src/riak_kv_put_fsm.erl b/src/riak_kv_put_fsm.erl index d6a8128929..85653d92d9 100644 --- a/src/riak_kv_put_fsm.erl +++ b/src/riak_kv_put_fsm.erl @@ -44,7 +44,6 @@ waiting_remote_vnode/2, postcommit/2, finish/2]). - -type detail_info() :: timing. -type detail() :: true | false | @@ -94,9 +93,11 @@ put_usecs :: undefined | non_neg_integer(), timing = [] :: [{atom(), {non_neg_integer(), non_neg_integer(), non_neg_integer()}}], - reply % reply sent to client + reply, % reply sent to client + tracked_bucket=false :: boolean() %% tracke per bucket stats }). +-include("riak_kv_dtrace.hrl"). -define(PARSE_INDEX_PRECOMMIT, {struct, [{<<"mod">>, <<"riak_index">>}, {<<"fun">>, <<"parse_object_hook">>}]}). -define(DEFAULT_TIMEOUT, 60000). @@ -147,9 +148,21 @@ test_link(From, Object, PutOptions, StateProps) -> %% @private init([From, RObj, Options]) -> + BKey = {Bucket, Key} = {riak_object:bucket(RObj), riak_object:key(RObj)}, StateData = add_timing(prepare, #state{from = From, robj = RObj, + bkey = BKey, options = Options}), + riak_core_dtrace:put_tag(io_lib:format("~p,~p", [Bucket, Key])), + case riak_kv_util:is_x_deleted(RObj) of + true -> + TombNum = 1, + TombStr = <<"tombstone">>; + false -> + TombNum = 0, + TombStr = <<>> + end, + ?DTRACE(?C_PUT_FSM_INIT, [TombNum], ["init", TombStr]), {ok, prepare, StateData, 0}; init({test, Args, StateProps}) -> %% Call normal init @@ -170,12 +183,13 @@ init({test, Args, StateProps}) -> %% @private prepare(timeout, StateData0 = #state{from = From, robj = RObj, + bkey = BKey, options = Options}) -> {ok,Ring} = riak_core_ring_manager:get_my_ring(), BucketProps = riak_core_bucket:get_bucket(riak_object:bucket(RObj), Ring), - BKey = {riak_object:bucket(RObj), riak_object:key(RObj)}, DocIdx = riak_core_util:chash_key(BKey), N = proplists:get_value(n_val,BucketProps), + StatTracked = proplists:get_value(stat_tracked, BucketProps, false), UpNodes = riak_core_node_watcher:nodes(riak_kv), Preflist2 = riak_core_apl:get_apl_ann(DocIdx, N, Ring, UpNodes), %% Check if this node is in the preference list so it can coordinate @@ -185,17 +199,24 @@ prepare(timeout, StateData0 = #state{from = From, robj = RObj, case {Preflist2, LocalPL =:= [] andalso Must == true} of {[], _} -> %% Empty preflist + ?DTRACE(?C_PUT_FSM_PREPARE, [-1], ["prepare",<<"all nodes down">>]), process_reply({error, all_nodes_down}, StateData0); {_, true} -> %% This node is not in the preference list %% forward on to the first node [{{_Idx, CoordNode},_Type}|_] = Preflist2, Timeout = get_option(timeout, Options, ?DEFAULT_TIMEOUT), + ?DTRACE(?C_PUT_FSM_PREPARE, [1], + ["prepare", atom2list(CoordNode)]), case rpc:call(CoordNode,riak_kv_put_fsm_sup,start_put_fsm,[CoordNode,[From,RObj,Options]],Timeout) of {ok, _Pid} -> + ?DTRACE(?C_PUT_FSM_PREPARE, [2], + ["prepare", atom2list(CoordNode)]), riak_kv_stat:update(coord_redir), {stop, normal, StateData0}; {_, Reason} -> % {error,_} or {badrpc,_} + ?DTRACE(?C_PUT_FSM_PREPARE, [-2], + ["prepare", dtrace_errstr(Reason)]), lager:error("Unable to forward put for ~p to ~p - ~p\n", [BKey, CoordNode, Reason]), process_reply({error, {coord_handoff_failed, Reason}}, StateData0) @@ -209,14 +230,19 @@ prepare(timeout, StateData0 = #state{from = From, robj = RObj, _ -> undefined end, + CoordPlNode = case CoordPLEntry of + undefined -> undefined; + {_Idx, Nd} -> atom2list(Nd) + end, %% This node is in the preference list, continue StartTime = riak_core_util:moment(), StateData = StateData0#state{n = N, - bkey = BKey, bucket_props = BucketProps, coord_pl_entry = CoordPLEntry, preflist2 = Preflist2, - starttime = StartTime}, + starttime = StartTime, + tracked_bucket = StatTracked}, + ?DTRACE(?C_PUT_FSM_PREPARE, [0], ["prepare", CoordPlNode]), new_state_timeout(validate, StateData) end. @@ -283,6 +309,7 @@ validate(timeout, StateData0 = #state{from = {raw, ReqId, _Pid}, StateData2 = handle_options(Options, StateData1), StateData3 = apply_updates(StateData2), StateData = init_putcore(StateData3), + ?DTRACE(?C_PUT_FSM_VALIDATE, [N, W, PW, DW], []), case Precommit of [] -> % Nothing to run, spare the timing code execute(StateData); @@ -298,10 +325,13 @@ precommit(timeout, State = #state{precommit = [Hook | Rest], robj = RObj}) -> Result = decode_precommit(invoke_hook(Hook, RObj)), case Result of fail -> + ?DTRACE(?C_PUT_FSM_PRECOMMIT, [-1], []), process_reply({error, precommit_fail}, State); {fail, Reason} -> + ?DTRACE(?C_PUT_FSM_PRECOMMIT, [-1], [dtrace_errstr(Reason)]), process_reply({error, {precommit_fail, Reason}}, State); Result -> + ?DTRACE(?C_PUT_FSM_PRECOMMIT, [0], []), {next_state, precommit, State#state{robj = riak_object:apply_updates(Result), precommit = Rest}, 0} end. @@ -321,9 +351,10 @@ execute(State=#state{coord_pl_entry = CPL}) -> %% N.B. Not actually a state - here in the source to make reading the flow easier execute_local(StateData=#state{robj=RObj, req_id = ReqId, timeout=Timeout, bkey=BKey, - coord_pl_entry = {_Index, _Node} = CoordPLEntry, + coord_pl_entry = {_Index, Node} = CoordPLEntry, vnode_options=VnodeOptions, starttime = StartTime}) -> + ?DTRACE(?C_PUT_FSM_EXECUTE_LOCAL, [], [atom2list(Node)]), StateData1 = add_timing(execute_local, StateData), TRef = schedule_timeout(Timeout), riak_kv_vnode:coord_put(CoordPLEntry, BKey, RObj, ReqId, StartTime, VnodeOptions), @@ -334,22 +365,31 @@ execute_local(StateData=#state{robj=RObj, req_id = ReqId, %% @private waiting_local_vnode(request_timeout, StateData) -> + ?DTRACE(?C_PUT_FSM_WAITING_LOCAL_VNODE, [-1], []), process_reply({error,timeout}, StateData); waiting_local_vnode(Result, StateData = #state{putcore = PutCore}) -> UpdPutCore1 = riak_kv_put_core:add_result(Result, PutCore), case Result of - {fail, _Idx, _ReqId} -> + {fail, Idx, _ReqId} -> + ?DTRACE(?C_PUT_FSM_WAITING_LOCAL_VNODE, [-1], + [integer_to_list(Idx)]), %% Local vnode failure is enough to sink whole operation process_reply({error, local_put_failed}, StateData#state{putcore = UpdPutCore1}); - {w, _Idx, _ReqId} -> + {w, Idx, _ReqId} -> + ?DTRACE(?C_PUT_FSM_WAITING_LOCAL_VNODE, [1], + [integer_to_list(Idx)]), {next_state, waiting_local_vnode, StateData#state{putcore = UpdPutCore1}}; - {dw, _Idx, PutObj, _ReqId} -> + {dw, Idx, PutObj, _ReqId} -> %% Either returnbody is true or coord put merged with the existing %% object and bumped the vclock. Either way use the returned %% object for the remote vnode + ?DTRACE(?C_PUT_FSM_WAITING_LOCAL_VNODE, [2], + [integer_to_list(Idx)]), execute_remote(StateData#state{robj = PutObj, putcore = UpdPutCore1}); - {dw, _Idx, _ReqId} -> + {dw, Idx, _ReqId} -> %% Write succeeded without changes to vclock required and returnbody false + ?DTRACE(?C_PUT_FSM_WAITING_LOCAL_VNODE, [2], + [integer_to_list(Idx)]), execute_remote(StateData#state{putcore = UpdPutCore1}) end. @@ -366,6 +406,9 @@ execute_remote(StateData=#state{robj=RObj, req_id = ReqId, StateData1 = add_timing(execute_remote, StateData), Preflist = [IndexNode || {IndexNode, _Type} <- Preflist2, IndexNode /= CoordPLEntry], + Ps = [[atom2list(Nd), $,, integer_to_list(Idx)] || + {Idx, Nd} <- lists:sublist(Preflist, 4)], + ?DTRACE(?C_PUT_FSM_EXECUTE_REMOTE, [], [Ps]), riak_kv_vnode:put(Preflist, BKey, RObj, ReqId, StartTime, VnodeOptions), case riak_kv_put_core:enough(PutCore) of true -> @@ -378,8 +421,12 @@ execute_remote(StateData=#state{robj=RObj, req_id = ReqId, %% @private waiting_remote_vnode(request_timeout, StateData) -> + ?DTRACE(?C_PUT_FSM_WAITING_REMOTE_VNODE, [-1], []), process_reply({error,timeout}, StateData); waiting_remote_vnode(Result, StateData = #state{putcore = PutCore}) -> + ShortCode = riak_kv_put_core:result_shortcode(Result), + IdxStr = integer_to_list(riak_kv_put_core:result_idx(Result)), + ?DTRACE(?C_PUT_FSM_WAITING_REMOTE_VNODE, [ShortCode], [IdxStr]), UpdPutCore1 = riak_kv_put_core:add_result(Result, PutCore), case riak_kv_put_core:enough(UpdPutCore1) of true -> @@ -391,9 +438,11 @@ waiting_remote_vnode(Result, StateData = #state{putcore = PutCore}) -> %% @private postcommit(timeout, StateData = #state{postcommit = []}) -> + ?DTRACE(?C_PUT_FSM_POSTCOMMIT, [0], []), new_state_timeout(finish, StateData); postcommit(timeout, StateData = #state{postcommit = [Hook | Rest], putcore = PutCore}) -> + ?DTRACE(?C_PUT_FSM_POSTCOMMIT, [-2], []), %% Process the next hook - gives sys:get_status messages a chance if hooks %% take a long time. {ReplyObj, UpdPutCore} = riak_kv_put_core:final(PutCore), @@ -401,24 +450,35 @@ postcommit(timeout, StateData = #state{postcommit = [Hook | Rest], {next_state, postcommit, StateData#state{postcommit = Rest, putcore = UpdPutCore}, 0}; postcommit(request_timeout, StateData) -> % still process hooks even if request timed out + ?DTRACE(?C_PUT_FSM_POSTCOMMIT, [-3], []), {next_state, postcommit, StateData, 0}; postcommit(Reply, StateData = #state{putcore = PutCore}) -> + ShortCode = riak_kv_put_core:result_shortcode(Reply), + IdxStr = integer_to_list(riak_kv_put_core:result_idx(Reply)), + ?DTRACE(?C_PUT_FSM_POSTCOMMIT, [0, ShortCode], [IdxStr]), %% late responses - add to state. *Does not* recompute finalobj UpdPutCore = riak_kv_put_core:add_result(Reply, PutCore), {next_state, postcommit, StateData#state{putcore = UpdPutCore}, 0}. -finish(timeout, StateData = #state{timing = Timing, reply = Reply}) -> +finish(timeout, StateData = #state{timing = Timing, reply = Reply, + bkey = {Bucket, _Key}, + tracked_bucket = StatTracked}) -> case Reply of {error, _} -> + ?DTRACE(?C_PUT_FSM_FINISH, [-1], []), ok; _Ok -> %% TODO: Improve reporting of timing %% For now can add debug tracers to view the return from calc_timing - {Duration, _Stages} = calc_timing(Timing), - riak_kv_stat:update({put_fsm_time, Duration}) + {Duration, Stages} = riak_kv_fsm_timing:calc_timing(Timing), + riak_kv_stat:update({put_fsm_time, Bucket, Duration, Stages, StatTracked}), + ?DTRACE(?C_PUT_FSM_FINISH, [0, Duration], []) end, {stop, normal, StateData}; finish(Reply, StateData = #state{putcore = PutCore}) -> + ShortCode = riak_kv_put_core:result_shortcode(Reply), + IdxStr = integer_to_list(riak_kv_put_core:result_idx(Reply)), + ?DTRACE(?C_PUT_FSM_FINISH, [1, ShortCode], [IdxStr]), %% late responses - add to state. *Does not* recompute finalobj UpdPutCore = riak_kv_put_core:add_result(Reply, PutCore), {next_state, finish, StateData#state{putcore = UpdPutCore}, 0}. @@ -461,7 +521,9 @@ new_state_timeout(StateName, StateData) -> %% What to do once enough responses from vnodes have been received to reply process_reply(Reply, StateData = #state{postcommit = PostCommit, - putcore = PutCore}) -> + putcore = PutCore, + robj = RObj, + bkey = {Bucket, Key}}) -> StateData1 = client_reply(Reply, StateData), StateData2 = case PostCommit of [] -> @@ -475,10 +537,18 @@ process_reply(Reply, StateData = #state{postcommit = PostCommit, end, case Reply of ok -> + ?DTRACE(?C_PUT_FSM_PROCESS_REPLY, [0], []), new_state_timeout(postcommit, StateData2); {ok, _} -> + Values = riak_object:get_values(RObj), + %% TODO: more accurate sizing method + ApproxBytes = size(Bucket) + size(Key) + + lists:sum([size(V) || V <- Values]), + NumSibs = length(Values), + ?DTRACE(?C_PUT_FSM_PROCESS_REPLY, [1, ApproxBytes, NumSibs], []), new_state_timeout(postcommit, StateData2); _ -> + ?DTRACE(?C_PUT_FSM_PROCESS_REPLY, [-1], []), new_state_timeout(finish, StateData2) end. @@ -574,7 +644,7 @@ update_last_modified(RObj) -> %% which should serve the same purpose. It was possible to generate two %% objects with the same vclock on 0.14.2 if the same clientid was used in %% the same second. It can be revisited post-1.0.0. - Now = erlang:now(), + Now = os:timestamp(), NewMD = dict:store(?MD_VTAG, make_vtag(Now), dict:store(?MD_LASTMOD, Now, MD0)), riak_object:update_metadata(RObj, NewMD). @@ -614,18 +684,25 @@ invoke_hook(_, _, _, _) -> -spec decode_precommit(any()) -> fail | {fail, any()} | riak_object:riak_object(). decode_precommit({erlang, {Mod, Fun}, Result}) -> + %% TODO: For DTrace things, we will err on the side of taking the + %% time to format the error results into strings to pass to + %% the probes. If this ends up being too slow, then revisit. case Result of fail -> + ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-1], []), riak_kv_stat:update(precommit_fail), lager:debug("Pre-commit hook ~p:~p failed, no reason given", [Mod, Fun]), fail; {fail, Reason} -> + ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-2], [dtrace_errstr(Reason)]), riak_kv_stat:update(precommit_fail), lager:debug("Pre-commit hook ~p:~p failed with reason ~p", [Mod, Fun, Reason]), Result; {'EXIT', Mod, Fun, Class, Exception} -> + ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-3], + [dtrace_errstr({Mod, Fun, Class, Exception})]), riak_kv_stat:update(precommit_fail), lager:debug("Problem invoking pre-commit hook ~p:~p -> ~p:~p~n~p", [Mod,Fun,Class,Exception, erlang:get_stacktrace()]), @@ -633,7 +710,9 @@ decode_precommit({erlang, {Mod, Fun}, Result}) -> Obj -> try riak_object:ensure_robject(Obj) - catch _:_ -> + catch X:Y -> + ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-4], + [dtrace_errstr({Mod, Fun, X, Y})]), riak_kv_stat:update(precommit_fail), lager:debug("Problem invoking pre-commit hook ~p:~p," " invalid return ~p", @@ -645,11 +724,13 @@ decode_precommit({erlang, {Mod, Fun}, Result}) -> decode_precommit({js, JSName, Result}) -> case Result of {ok, <<"fail">>} -> + ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-5], []), riak_kv_stat:update(precommit_fail), lager:debug("Pre-commit hook ~p failed, no reason given", [JSName]), fail; {ok, [{<<"fail">>, Message}]} -> + ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-6],[dtrace_errstr(Message)]), riak_kv_stat:update(precommit_fail), lager:debug("Pre-commit hook ~p failed with reason ~p", [JSName, Message]), @@ -657,16 +738,19 @@ decode_precommit({js, JSName, Result}) -> {ok, Json} -> case catch riak_object:from_json(Json) of {'EXIT', _} -> + ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-7], []), {fail, {invalid_return, {JSName, Json}}}; Obj -> Obj end; {error, Error} -> riak_kv_stat:update(precommit_fail), + ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-7], [dtrace_errstr(Error)]), lager:debug("Problem invoking pre-commit hook: ~p", [Error]), fail end; decode_precommit({error, Reason}) -> + ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-8], [dtrace_errstr(Reason)]), riak_kv_stat:update(precommit_fail), lager:debug("Problem invoking pre-commit hook: ~p", [Reason]), {fail, Reason}. @@ -674,22 +758,28 @@ decode_precommit({error, Reason}) -> decode_postcommit({erlang, {M,F}, Res}) -> case Res of fail -> + ?DTRACE(?C_PUT_FSM_DECODE_POSTCOMMIT, [-1], []), riak_kv_stat:update(postcommit_fail), lager:debug("Post-commit hook ~p:~p failed, no reason given", [M, F]); {fail, Reason} -> + ?DTRACE(?C_PUT_FSM_DECODE_POSTCOMMIT, [-2],[dtrace_errstr(Reason)]), riak_kv_stat:update(postcommit_fail), lager:debug("Post-commit hook ~p:~p failed with reason ~p", [M, F, Reason]); {'EXIT', _, _, Class, Ex} -> + ?DTRACE(?C_PUT_FSM_DECODE_POSTCOMMIT, [-3], + [dtrace_errstr({M, F, Class, Ex})]), riak_kv_stat:update(postcommit_fail), Stack = erlang:get_stacktrace(), lager:debug("Problem invoking post-commit hook ~p:~p -> ~p:~p~n~p", [M, F, Class, Ex, Stack]), ok; - _ -> ok + _ -> + ok end; decode_postcommit({error, {invalid_hook_def, Def}}) -> + ?DTRACE(?C_PUT_FSM_DECODE_POSTCOMMIT, [-4], [dtrace_errstr(Def)]), riak_kv_stat:update(postcommit_fail), lager:debug("Invalid post-commit hook definition ~p", [Def]). @@ -739,7 +829,7 @@ client_info([], _StateData, Info) -> Info; client_info([timing | Rest], StateData = #state{timing = Timing}, Info) -> %% Duration is time from receiving request to responding - {ResponseUsecs, Stages} = calc_timing(Timing), + {ResponseUsecs, Stages} = riak_kv_fsm_timing:calc_timing(Timing), client_info(Rest, StateData, [{response_usecs, ResponseUsecs}, {stages, Stages} | Info]). @@ -749,29 +839,15 @@ default_details() -> %% Add timing information to the state add_timing(Stage, State = #state{timing = Timing}) -> - State#state{timing = [{Stage, os:timestamp()} | Timing]}. - -%% Calc timing information - stored as {Stage, StageStart} in reverse order. -%% ResponseUsecs is calculated as time from reply to start. -calc_timing([{Stage, Now} | Timing]) -> - ReplyNow = case Stage of - reply -> - Now; - _ -> - undefined - end, - calc_timing(Timing, Now, ReplyNow, []). - -%% Each timing stage has start time. -calc_timing([], StageEnd, ReplyNow, Stages) -> - %% StageEnd is prepare time - {timer:now_diff(ReplyNow, StageEnd), Stages}; -calc_timing([{reply, ReplyNow}|_]=Timing, StageEnd, undefined, Stages) -> - %% Populate ReplyNow then handle normally. - calc_timing(Timing, StageEnd, ReplyNow, Stages); -calc_timing([{Stage, StageStart} | Rest], StageEnd, ReplyNow, Stages) -> - calc_timing(Rest, StageStart, ReplyNow, - [{Stage, timer:now_diff(StageEnd, StageStart)} | Stages]). + State#state{timing = riak_kv_fsm_timing:add_timing(Stage, Timing)}. + +atom2list(A) when is_atom(A) -> + atom_to_list(A); +atom2list(P) when is_pid(P)-> + pid_to_list(P). % eunit tests + +dtrace_errstr(Term) -> + io_lib:format("~P", [Term, 12]). %% =================================================================== %% EUnit tests diff --git a/src/riak_kv_reduce_phase.erl b/src/riak_kv_reduce_phase.erl deleted file mode 100644 index a8c1628642..0000000000 --- a/src/riak_kv_reduce_phase.erl +++ /dev/null @@ -1,112 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_reduce_phase: manage the mechanics of a reduce phase of a MR job -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc manage the mechanics of a reduce phase of a MR job - --module(riak_kv_reduce_phase). --include_lib("riak_kv_js_pools.hrl"). - --behaviour(luke_phase). - --export([init/1, handle_input/3, handle_input_done/1, handle_event/2, - handle_sync_event/3, handle_timeout/1, handle_info/2, terminate/2]). - --record(state, {qterm, reduced=[], new_inputs=[]}). - -%% @private -init([QTerm]) -> - {ok, #state{qterm=QTerm}}. - -handle_input(Inputs, #state{reduced=Reduced0, qterm=QTerm, new_inputs=New0}=State0, _Timeout) -> - New1 = New0 ++ Inputs, - if - length(New1) > 20 -> - case perform_reduce(QTerm, New1) of - {ok, Reduced} -> - {no_output, State0#state{reduced=Reduced0 ++ Reduced, new_inputs=[]}, 250}; - Error -> - {stop, Error, State0#state{reduced=[], new_inputs=[]}} - end; - true -> - {no_output, State0#state{new_inputs=New1}, 250} - end. - -handle_input_done(#state{qterm=QTerm, reduced=Reduced0, new_inputs=New0}=State) -> - case perform_reduce(QTerm, Reduced0 ++ New0) of - {ok, Reduced} -> - luke_phase:complete(), - {output, Reduced, State#state{reduced=Reduced}}; - Error -> - {stop, Error, State#state{reduced=[]}} - end. - -handle_timeout(#state{qterm=QTerm, reduced=Reduced0, new_inputs=New0}=State) -> - if - length(New0) > 0 -> - case perform_reduce(QTerm, New0) of - {ok, Reduced} -> - {no_output, State#state{reduced=Reduced0 ++ Reduced, new_inputs=[]}, 250}; - Error -> - {stop, Error, State#state{reduced=[], new_inputs=[]}} - end; - true -> - {no_output, State, 250} - end. - -handle_sync_event(_Event, _From, State) -> - {reply, ignored, State}. - -handle_event(_Event, State) -> - {no_output, State}. - -handle_info(_Info, State) -> - {no_output, State}. - -terminate(_Reason, _State) -> - ok. - -perform_reduce({Lang,{reduce,FunTerm,Arg,_Acc}}, - Reduced) -> - try - case {Lang, FunTerm} of - {erlang, {qfun,F}} -> - Value = F(Reduced,Arg), - {ok, Value}; - {erlang, {modfun,M,F}} -> - Value = M:F(Reduced,Arg), - {ok, Value}; - {javascript, _} -> - case riak_kv_js_manager:blocking_dispatch(?JSPOOL_REDUCE, {FunTerm, - [riak_kv_mapred_json:jsonify_not_found(R) || R <- Reduced], - Arg}, 25) of - {ok, Data} when is_list(Data) -> - Data1 = [riak_kv_mapred_json:dejsonify_not_found(Datum) || Datum <- Data], - {ok, Data1}; - {error, timeout} -> - throw({error, javascript_reduce_timeout}); - Error -> - throw(Error) - end - end - catch _:R -> - R - end. diff --git a/src/riak_kv_stat.erl b/src/riak_kv_stat.erl index a309e427ea..d94f62861c 100644 --- a/src/riak_kv_stat.erl +++ b/src/riak_kv_stat.erl @@ -20,752 +20,314 @@ %% %% ------------------------------------------------------------------- -%% @doc riak_kv_stat is a long-lived gen_server process for aggregating +%% @doc riak_kv_stat is a module for aggregating %% stats about the Riak node on which it is runing. %% -%% Update each stat with the exported function update/1. Modify -%% the internal function update/3 to add storage for new stats. +%% Update each stat with the exported function update/1. Add +%% a new stat to the internal stats/0 func to register a new stat with +%% folsom. %% %% Get the latest aggregation of stats with the exported function -%% get_stats/0. Modify the internal function produce_stats/1 to -%% change how stats are represented. -%% -%% Riak will start riak_kv_stat for you, if you have specified -%% {riak_kv_stat, true} in your config .erlenv file. -%% -%% Current stats: -%%
vnode_gets -%%
Total number of gets handled by all vnodes on this node -%% in the last minute. -%%
update(vnode_get) -%% -%%
vnode_puts -%%
Total number of puts handled by all vnodes on this node -%% in the last minute. -%%
update(vnode_put) -%% -%%
vnode_index_reads -%%
The number of index reads handled by all vnodes on this node. -%% Each query counts as an index read. -%%
update(vnode_index_read) -%% -%%
vnode_index_writes -%%
The number of batched writes handled by all vnodes on this node. -%%
update({vnode_index_write, PostingsAdded, PostingsRemoved}) -%% -%%
vnode_index_writes_postings -%%
The number of postings written to all vnodes on this node. -%%
update({vnode_index_write, PostingsAdded, PostingsRemoved}) -%% -%%
vnode_index_deletes -%%
The number of batched writes handled by all vnodes on this node. -%%
update({vnode_index_delete, PostingsRemoved}) -%% -%%
vnode_index_deletes_postings -%%
The number of postings written to all vnodes on this node. -%%
update({vnode_index_delete, PostingsRemoved}) -%% -%%
node_gets -%%
Number of gets coordinated by this node in the last -%% minute. -%%
update({get_fsm, _Bucket, Microseconds, NumSiblings, ObjSize}) -%% -%%
node_get_fsm_siblings -%%
Stats about number of siblings per object in the last minute. -%%
Updated via node_gets. -%% -%%
node_get_fsm_objsize -%%
Stats about object size over the last minute. The object -%% size is an estimate calculated by summing the size of the -%% bucket name, key name, and serialized vector clock, plus -%% the value and serialized metadata of each sibling. -%%
Updated via node_gets. -%% -%%
node_get_fsm_time_mean -%%
Mean time, in microseconds, between when a riak_kv_get_fsm is -%% started and when it sends a reply to the client, for the -%% last minute. -%%
update({get_fsm_time, Microseconds}) -%% -%%
node_get_fsm_time_median -%%
Median time, in microseconds, between when a riak_kv_get_fsm -%% is started and when it sends a reply to the client, for -%% the last minute. -%%
update({get_fsm_time, Microseconds}) -%% -%%
node_get_fsm_time_95 -%%
Response time, in microseconds, met or beaten by 95% of -%% riak_kv_get_fsm executions. -%%
update({get_fsm_time, Microseconds}) -%% -%%
node_get_fsm_time_99 -%%
Response time, in microseconds, met or beaten by 99% of -%% riak_kv_get_fsm executions. -%%
update({get_fsm_time, Microseconds}) -%% -%%
node_get_fsm_time_100 -%%
Response time, in microseconds, met or beaten by 100% of -%% riak_kv_get_fsm executions. -%%
update({get_fsm_time, Microseconds}) -%% -%%
node_puts -%%
Number of puts coordinated by this node in the last -%% minute. -%%
update({put_fsm_time, Microseconds}) -%% -%%
node_put_fsm_time_mean -%%
Mean time, in microseconds, between when a riak_kv_put_fsm is -%% started and when it sends a reply to the client, for the -%% last minute. -%%
update({put_fsm_time, Microseconds}) -%% -%%
node_put_fsm_time_median -%%
Median time, in microseconds, between when a riak_kv_put_fsm -%% is started and when it sends a reply to the client, for -%% the last minute. -%%
update({put_fsm_time, Microseconds}) -%% -%%
node_put_fsm_time_95 -%%
Response time, in microseconds, met or beaten by 95% of -%% riak_kv_put_fsm executions. -%%
update({put_fsm_time, Microseconds}) -%% -%%
node_put_fsm_time_99 -%%
Response time, in microseconds, met or beaten by 99% of -%% riak_kv_put_fsm executions. -%%
update({put_fsm_time, Microseconds}) -%% -%%
node_put_fsm_time_100 -%%
Response time, in microseconds, met or beaten by 100% of -%% riak_kv_put_fsm executions. -%%
update({put_fsm_time, Microseconds}) -%% -%%
cpu_nprocs -%%
Value returned by {@link cpu_sup:nprocs/0}. -%% -%%
cpu_avg1 -%%
Value returned by {@link cpu_sup:avg1/0}. -%% -%%
cpu_avg5 -%%
Value returned by {@link cpu_sup:avg5/0}. -%% -%%
cpu_avg15 -%%
Value returned by {@link cpu_sup:avg15/0}. -%% -%%
mem_total -%%
The first element of the tuple returned by -%% {@link memsup:get_memory_data/0}. -%% -%%
mem_allocated -%%
The second element of the tuple returned by -%% {@link memsup:get_memory_data/0}. -%% -%%
disk -%%
Value returned by {@link disksup:get_disk_data/0}. -%% -%%
pbc_connects_total -%%
Total number of pb socket connections since start -%% -%%
pbc_active -%%
Number of active pb socket connections -%% -%%
coord_redirs_total -%%
Number of puts forwarded to be coordinated on a node -%% in the preflist. -%% -%%
-%% +%% get_stats/0. Or use folsom_metrics:get_metric_value/1, +%% or riak_core_stat_q:get_stats/1. %% + -module(riak_kv_stat). --behaviour(gen_server2). +-behaviour(gen_server). %% API --export([start_link/0, get_stats/0, get_stats/1, update/1]). +-export([start_link/0, get_stats/0, + update/1, register_stats/0, produce_stats/0, + leveldb_read_block_errors/0]). + +-export([track_bucket/1, untrack_bucket/1]). %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). --record(state,{vnode_gets,vnode_puts,vnode_gets_total,vnode_puts_total, - vnode_index_reads, vnode_index_reads_total, - vnode_index_writes, vnode_index_writes_total, - vnode_index_writes_postings, vnode_index_writes_postings_total, - vnode_index_deletes, vnode_index_deletes_total, - vnode_index_deletes_postings, vnode_index_deletes_postings_total, - node_gets_total, node_puts_total, - node_get_fsm_siblings, node_get_fsm_objsize, - get_fsm_time,put_fsm_time, - pbc_connects,pbc_connects_total,pbc_active, - read_repairs, read_repairs_total, - coord_redirs, coord_redirs_total, mapper_count, - get_meter, put_meter, - precommit_fail, postcommit_fail, - legacy}). - - -%% @spec start_link() -> {ok,Pid} | ignore | {error,Error} -%% @doc Start the server. Also start the os_mon application, if it's -%% not already running. +-define(SERVER, ?MODULE). +-define(APP, riak_kv). + start_link() -> - case application:start(os_mon) of - ok -> ok; - {error, {already_started, os_mon}} -> ok - %% die if os_mon doesn't start - end, - gen_server2:start_link({local, ?MODULE}, ?MODULE, [], []). + gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). + +register_stats() -> + [(catch folsom_metrics:delete_metric(Stat)) || Stat <- folsom_metrics:get_metrics(), + is_tuple(Stat), element(1, Stat) == ?APP], + [register_stat(stat_name(Name), Type) || {Name, Type} <- stats()], + riak_core_stat_cache:register_app(?APP, {?MODULE, produce_stats, []}). %% @spec get_stats() -> proplist() %% @doc Get the current aggregation of stats. get_stats() -> - get_stats(slide:moment()). + case riak_core_stat_cache:get_stats(?APP) of + {ok, Stats, _TS} -> + Stats; + Error -> Error + end. + +update(Arg) -> + gen_server:cast(?SERVER, {update, Arg}). -get_stats(Moment) -> - gen_server2:call(?MODULE, {get_stats, Moment}, infinity). +track_bucket(Bucket) when is_binary(Bucket) -> + riak_core_bucket:set_bucket(Bucket, [{stat_tracked, true}]). -%% @spec update(term()) -> ok -%% @doc Update the given stat. -update(Stat) -> - gen_server2:cast(?MODULE, {update, Stat, slide:moment()}). +untrack_bucket(Bucket) when is_binary(Bucket) -> + riak_core_bucket:set_bucket(Bucket, [{stat_tracked, false}]). + +%% gen_server -%% @private init([]) -> - process_flag(trap_exit, true), - remove_slide_private_dirs(), - case application:get_env(riak_kv, legacy_stats) of - {ok, false} -> - lager:warning("Overriding user-setting and using legacy stats. Set {legacy_stats,true} to remove this message."); - _ -> - ok - end, - legacy_init(). - --ifdef(not_defined). -make_meter() -> - {ok, M} = basho_metrics_nifs:meter_new(), - {meter, M}. - -make_histogram() -> - {ok, H} = basho_metrics_nifs:histogram_new(), - {histogram, H}. - -v2_init() -> - timer:send_interval(5000, tick), - {ok, #state{vnode_gets=make_meter(), - vnode_puts=make_meter(), - vnode_gets_total=0, - vnode_puts_total=0, - vnode_index_reads=make_meter(), - vnode_index_reads_total=0, - vnode_index_writes=make_meter(), - vnode_index_writes_total=0, - vnode_index_writes_postings=make_meter(), - vnode_index_writes_postings_total=0, - vnode_index_deletes=make_meter(), - vnode_index_deletes_total=0, - vnode_index_deletes_postings=make_meter(), - vnode_index_deletes_postings_total=0, - node_gets_total=0, - node_puts_total=0, - %% REMEMBER TO ADD LOGIC FOR node_get_fsm_siblings and node_get_fsm_objsize - get_fsm_time=make_histogram(), - put_fsm_time=make_histogram(), - pbc_connects=make_meter(), - pbc_connects_total=0, - pbc_active=0, - read_repairs=make_meter(), - read_repairs_total=0, - coord_redirs=make_meter(), - coord_redirs_total=0, - mapper_count=0, - get_meter=make_meter(), - put_meter=make_meter(), - precommit_fail=0, - postcommit_fail=0, - legacy=false}}. --endif. - -legacy_init() -> - {ok, #state{vnode_gets=spiraltime:fresh(), - vnode_puts=spiraltime:fresh(), - vnode_gets_total=0, - vnode_puts_total=0, - vnode_index_reads=spiraltime:fresh(), - vnode_index_reads_total=0, - vnode_index_writes=spiraltime:fresh(), - vnode_index_writes_total=0, - vnode_index_writes_postings=spiraltime:fresh(), - vnode_index_writes_postings_total=0, - vnode_index_deletes=spiraltime:fresh(), - vnode_index_deletes_total=0, - vnode_index_deletes_postings=spiraltime:fresh(), - vnode_index_deletes_postings_total=0, - node_gets_total=0, - node_puts_total=0, - node_get_fsm_siblings=slide:fresh(), - node_get_fsm_objsize=slide:fresh(), - get_fsm_time=slide:fresh(), - put_fsm_time=slide:fresh(), - pbc_connects=spiraltime:fresh(), - pbc_connects_total=0, - pbc_active=0, - read_repairs=spiraltime:fresh(), - read_repairs_total=0, - coord_redirs_total=0, - mapper_count=0, - precommit_fail=0, - postcommit_fail=0, - legacy=true}}. - -%% @private -handle_call({get_stats, Moment}, _From, State) -> - {reply, produce_stats(State, Moment), State}; -handle_call(_Request, _From, State) -> - Reply = ok, - {reply, Reply, State}. - -%% @private -handle_cast({update, Stat, Moment}, State) -> - {noreply, update(Stat, Moment, State, State#state.legacy)}; -handle_cast(_Msg, State) -> - {noreply, State}. + register_stats(), + {ok, ok}. + +handle_call(_Req, _From, State) -> + {reply, ok, State}. -%% @private -handle_info(tick, State) -> - tick(#state.get_meter, State), - tick(#state.put_meter, State), - tick(#state.vnode_gets, State), - tick(#state.vnode_puts, State), - tick(#state.vnode_index_reads, State), - tick(#state.vnode_index_writes, State), - tick(#state.vnode_index_writes_postings, State), - tick(#state.vnode_index_deletes, State), - tick(#state.vnode_index_deletes_postings, State), - tick(#state.pbc_connects, State), - tick(#state.read_repairs, State), - tick(#state.coord_redirs, State), +handle_cast({update, Arg}, State) -> + do_update(Arg), {noreply, State}; +handle_cast(_Req, State) -> + {noreply, State}. + handle_info(_Info, State) -> {noreply, State}. -%% @private terminate(_Reason, _State) -> - remove_slide_private_dirs(), ok. -%% @private code_change(_OldVsn, State, _Extra) -> {ok, State}. -%%-------------------------------------------------------------------- -%%% Internal functions -%%-------------------------------------------------------------------- - -update(Stat, Moment, State, true) -> - update(Stat, Moment, State); -update(Stat, Moment, State, false) -> - update1(Stat, Moment, State). - -%% @spec update(Stat::term(), integer(), state()) -> state() -%% @doc Update the given stat in State, returning a new State. -update(vnode_get, Moment, State=#state{vnode_gets_total=VGT}) -> - spiral_incr(#state.vnode_gets, Moment, State#state{vnode_gets_total=VGT+1}); -update(vnode_put, Moment, State=#state{vnode_puts_total=VPT}) -> - spiral_incr(#state.vnode_puts, Moment, State#state{vnode_puts_total=VPT+1}); -update(vnode_index_read, Moment, State=#state{vnode_index_reads_total=VPT}) -> - spiral_incr(#state.vnode_index_reads, Moment, State#state{vnode_index_reads_total=VPT+1}); -update({vnode_index_write, PostingsAdded, PostingsRemoved}, Moment, State=#state{vnode_index_writes_total=VIW, - vnode_index_writes_postings_total=VIWP, - vnode_index_deletes_postings_total=VIDP}) -> - NewState1 = spiral_incr(#state.vnode_index_writes, Moment, State#state{vnode_index_writes_total=VIW+1}), - NewState2 = spiral_incr(#state.vnode_index_writes_postings, PostingsAdded, Moment, NewState1#state{vnode_index_writes_postings_total=VIWP+PostingsAdded}), - NewState3 = spiral_incr(#state.vnode_index_deletes_postings, PostingsRemoved, Moment, NewState2#state{vnode_index_deletes_postings_total=VIDP+PostingsRemoved}), - NewState3; -update({vnode_index_delete, Postings}, Moment, State=#state{vnode_index_deletes_total=VID, vnode_index_deletes_postings_total=VIDP}) -> - NewState = spiral_incr(#state.vnode_index_deletes, Moment, State#state{vnode_index_deletes_total=VID+1}), - spiral_incr(#state.vnode_index_deletes_postings, Postings, Moment, NewState#state{vnode_index_deletes_postings_total=VIDP+Postings}); -update({get_fsm, _Bucket, Microsecs, undefined, undefined}, Moment, State) -> - NGT = State#state.node_gets_total, - NewState = State#state { node_gets_total=NGT+1 }, - slide_incr(#state.get_fsm_time, Microsecs, Moment, NewState); -update({get_fsm, _Bucket, Microsecs, NumSiblings, ObjSize}, Moment, State) -> - NGT = State#state.node_gets_total, - NewState1 = State#state { node_gets_total=NGT+1 }, - NewState2 = slide_incr(#state.get_fsm_time, Microsecs, Moment, NewState1), - NewState3 = slide_incr(#state.node_get_fsm_siblings, NumSiblings, Moment, NewState2), - NewState4 = slide_incr(#state.node_get_fsm_objsize, ObjSize, Moment, NewState3), - NewState4; -update({get_fsm_time, Microsecs}, Moment, State) -> - update({get_fsm, undefined, Microsecs, undefined, undefined}, Moment, State); -update({put_fsm_time, Microsecs}, Moment, State=#state{node_puts_total=NPT}) -> - slide_incr(#state.put_fsm_time, Microsecs, Moment, State#state{node_puts_total=NPT+1}); -update(pbc_connect, Moment, State=#state{pbc_connects_total=NCT, pbc_active=Active}) -> - spiral_incr(#state.pbc_connects, Moment, State#state{pbc_connects_total=NCT+1, - pbc_active=Active+1}); -update(pbc_disconnect, _Moment, State=#state{pbc_active=Active}) -> - State#state{pbc_active=decrzero(Active)}; -update(read_repairs, Moment, State=#state{read_repairs_total=RRT}) -> - spiral_incr(#state.read_repairs, Moment, State#state{read_repairs_total=RRT+1}); -update(coord_redir, _Moment, State=#state{coord_redirs_total=CRT}) -> - State#state{coord_redirs_total=CRT+1}; -update(mapper_start, _Moment, State=#state{mapper_count=Count0}) -> - State#state{mapper_count=Count0 + 1}; -update(mapper_end, _Moment, State=#state{mapper_count=Count0}) -> - State#state{mapper_count=decrzero(Count0)}; -update(precommit_fail, _Moment, State=#state{precommit_fail=Count0}) -> - State#state{precommit_fail=Count0+1}; -update(postcommit_fail, _Moment, State=#state{postcommit_fail=Count0}) -> - State#state{postcommit_fail=Count0+1}; -update(_, _, State) -> - State. - -tick(Field, State) -> - basho_metrics_nifs:meter_tick(element(2, element(Field, State))). - -update_metric(Field, Value, State) when is_integer(Value) -> - case element(Field, State) of - {meter, M} -> - basho_metrics_nifs:meter_update(M, Value); - {histogram, H} -> - basho_metrics_nifs:histogram_update(H, Value) - end, - State; -update_metric(Field, Value, State) -> - lager:error("Ignoring non-integer stats update for field ~p, value ~p", [Field, Value]), - State. - -%% @spec update(Stat::term(), integer(), state()) -> state() -%% @doc Update the given stat in State, returning a new State. -update1(vnode_get, _, State) -> - update_metric(#state.vnode_gets, 1, State); -update1(vnode_put, _, State) -> - update_metric(#state.vnode_puts, 1, State); -update1(vnode_index_read, _, State) -> - update_metric(#state.vnode_index_reads, 1, State); -update1({vnode_index_write, PostingsAdded, PostingsRemoved}, _, State) -> - State1 = update_metric(#state.vnode_index_writes, 1, State), - State2 = update_metric(#state.vnode_index_writes_postings, PostingsAdded, State1), - State3 = update_metric(#state.vnode_index_deletes_postings, PostingsRemoved, State2), - State3; -update1({vnode_index_delete, PostingsRemoved}, _, State) -> - State1 = update_metric(#state.vnode_index_deletes, 1, State), - State2 = update_metric(#state.vnode_index_deletes_postings, PostingsRemoved, State1), - State2; -update1({get_fsm, _Bucket, Microsecs, _NumSiblings, _ObjSize}, Moment, State) -> - update1({get_fsm_time, Microsecs}, Moment, State); -update1({get_fsm_time, Microsecs}, _, State) -> - update_metric(#state.get_meter, 1, - update_metric(#state.get_fsm_time, Microsecs, State)); -update1({put_fsm_time, Microsecs}, _, State) -> - update_metric(#state.put_meter, 1, - update_metric(#state.put_fsm_time, Microsecs, State)); -update1(pbc_connect, _, State=#state{pbc_active=Active}) -> - update_metric(#state.pbc_connects, 1, State#state{pbc_active=Active+1}); -update1(pbc_disconnect, _, State=#state{pbc_active=Active}) -> - State#state{pbc_active=decrzero(Active)}; -update1(read_repairs, _, State) -> - update_metric(#state.read_repairs, 1, State); -update1(coord_redir, _, State) -> - update_metric(#state.coord_redirs, 1, State); -update1(mapper_start, _Moment, State=#state{mapper_count=Count0}) -> - State#state{mapper_count=Count0+1}; -update1(mapper_end, _Moment, State=#state{mapper_count=Count0}) -> - State#state{mapper_count=decrzero(Count0)}; -update1(precommit_fail, _Moment, State=#state{precommit_fail=Count0}) -> - State#state{precommit_fail=Count0+1}; -update1(postcommit_fail, _Moment, State=#state{postcommit_fail=Count0}) -> - State#state{postcommit_fail=Count0+1}; -update1(_, _, State) -> - State. - -%% @doc decrement down to zero - do not go negative -decrzero(0) -> - 0; -decrzero(N) -> - N-1. - -%% @spec spiral_incr(integer(), integer(), state()) -> state() -%% @doc Increment the value of a spiraltime structure at a given -%% position of the State tuple. -spiral_incr(Elt, Moment, State) -> - setelement(Elt, State, - spiraltime:incr(1, Moment, element(Elt, State))). - -%% @spec spiral_incr(integer(), integer(), integer(), state()) -> state() -%% @doc Increment the value of a spiraltime structure at a given -%% position of the State tuple. -spiral_incr(Elt, Amount, Moment, State) -> - setelement(Elt, State, - spiraltime:incr(Amount, Moment, element(Elt, State))). - -%% @spec slide_incr(integer(), term(), integer(), state()) -> state() -%% @doc Update a slide structure at a given position in the -%% STate tuple. -slide_incr(Elt, Reading, Moment, State) -> - setelement(Elt, State, - slide:update(element(Elt, State), Reading, Moment)). - -%% @spec produce_stats(state(), integer()) -> proplist() -%% @doc Produce a proplist-formatted view of the current aggregation -%% of stats. -produce_stats(State, Moment) -> - lists:append( - [vnode_stats(Moment, State), - node_stats(Moment, State), - cpu_stats(), - mem_stats(), - disk_stats(), - system_stats(), - ring_stats(), - config_stats(), - pbc_stats(Moment, State), - app_stats(), - mapper_stats(State), - memory_stats() - ]). - -%% @spec spiral_minute(integer(), integer(), state()) -> integer() -%% @doc Get the count of events in the last minute from the spiraltime -%% structure at the given element of the state tuple. -spiral_minute(_Moment, Elt, State) -> - {_,Count} = spiraltime:rep_minute(element(Elt, State)), - Count. - -%% @spec slide_minute(integer(), integer(), state()) -> -%% {Count::integer(), Mean::ustat(), -%% {Median::ustat(), NinetyFive::ustat(), -%% NinetyNine::ustat(), Max::ustat()}} -%% @type ustat() = undefined | number() -%% @doc Get the Count of readings, the Mean of those readings, and the -%% Median, 95th percentile, 99th percentile, and Maximum readings -%% for the last minute from the slide structure at the given -%% element of the state tuple. -%% If Count is 0, then all other elements will be the atom -%% 'undefined'. -slide_minute(Moment, Elt, State, Min, Max, Bins, RoundingMode) -> - {Count, Mean, Nines} = slide:mean_and_nines(element(Elt, State), Moment, Min, Max, Bins, RoundingMode), - {Count, Mean, Nines}. - -metric_stats({meter, M}) -> - basho_metrics_nifs:meter_stats(M); -metric_stats({histogram, H}) -> - basho_metrics_nifs:histogram_stats(H). - -meter_minute(Stats) -> - trunc(proplists:get_value(one, Stats)). - -%% @spec vnode_stats(integer(), state()) -> proplist() -%% @doc Get the vnode-sum stats proplist. -vnode_stats(Moment, State=#state{legacy=true}) -> - lists:append( - [{F, spiral_minute(Moment, Elt, State)} - || {F, Elt} <- [{vnode_gets, #state.vnode_gets}, - {vnode_puts, #state.vnode_puts}, - {vnode_index_reads, #state.vnode_index_reads}, - {vnode_index_writes, #state.vnode_index_writes}, - {vnode_index_writes_postings, #state.vnode_index_writes_postings}, - {vnode_index_deletes, #state.vnode_index_deletes}, - {vnode_index_deletes_postings, #state.vnode_index_deletes_postings}, - {read_repairs,#state.read_repairs}]], - [{vnode_gets_total, State#state.vnode_gets_total}, - {vnode_puts_total, State#state.vnode_puts_total}, - {vnode_index_reads_total, State#state.vnode_index_reads_total}, - {vnode_index_writes_total, State#state.vnode_index_writes_total}, - {vnode_index_writes_postings_total, State#state.vnode_index_writes_postings_total}, - {vnode_index_deletes_total, State#state.vnode_index_deletes_total}, - {vnode_index_deletes_postings_total, State#state.vnode_index_deletes_postings_total}]); -vnode_stats(_, State=#state{legacy=false}) -> - VG = metric_stats(State#state.vnode_gets), - VP = metric_stats(State#state.vnode_puts), - VIR = metric_stats(State#state.vnode_index_reads), - VIW = metric_stats(State#state.vnode_index_writes), - VIWP = metric_stats(State#state.vnode_index_writes_postings), - VID = metric_stats(State#state.vnode_index_deletes), - VIDP = metric_stats(State#state.vnode_index_deletes_postings), - RR = metric_stats(State#state.read_repairs), - CR = metric_stats(State#state.coord_redirs), - [{vnode_gets, meter_minute(VG)}, - {vnode_puts, meter_minute(VP)}, - {vnode_index_reads, meter_minute(VIR)}, - {vnode_index_writes, meter_minute(VIW)}, - {vnode_index_writes_postings, meter_minute(VIWP)}, - {vnode_index_deletes, meter_minute(VID)}, - {vnode_index_deletes_postings, meter_minute(VIDP)}, - {read_repairs, meter_minute(RR)}, - {coord_redirs, meter_minute(CR)}, - {vnode_gets_total, proplists:get_value(count, VG)}, - {vnode_puts_total, proplists:get_value(count, VP)}, - {vnode_index_reads_total, proplists:get_value(count, VIR)}, - {vnode_index_writes_total, proplists:get_value(count, VIW)}, - {vnode_index_writes_postings_total, proplists:get_value(count, VIWP)}, - {vnode_index_deletes_total, proplists:get_value(count, VID)}, - {vnode_index_deletes_postings_total, proplists:get_value(count, VIDP)}]. - -%% @spec node_stats(integer(), state()) -> proplist() -%% @doc Get the node stats proplist. -node_stats(Moment, State=#state{node_gets_total=NGT, - node_puts_total=NPT, - read_repairs_total=RRT, - coord_redirs_total=CRT, - precommit_fail=PreF, - postcommit_fail=PostF, - legacy=true}) -> - {Gets, GetMean, {GetMedian, GetNF, GetNN, GetH}} = - slide_minute(Moment, #state.get_fsm_time, State, 0, 5000000, 20000, down), - {Puts, PutMean, {PutMedian, PutNF, PutNN, PutH}} = - slide_minute(Moment, #state.put_fsm_time, State, 0, 5000000, 20000, down), - {_Siblings, SiblingsMean, {SiblingsMedian, SiblingsNF, SiblingsNN, SiblingsH}} = - slide_minute(Moment, #state.node_get_fsm_siblings, State, 0, 1000, 1000, up), - {_ObjSize, ObjSizeMean, {ObjSizeMedian, ObjSizeNF, ObjSizeNN, ObjSizeH}} = - slide_minute(Moment, #state.node_get_fsm_objsize, State, 0, 16 * 1024 * 1024, 16 * 1024, down), - [{node_gets, Gets}, - {node_gets_total, NGT}, - {node_get_fsm_time_mean, GetMean}, - {node_get_fsm_time_median, GetMedian}, - {node_get_fsm_time_95, GetNF}, - {node_get_fsm_time_99, GetNN}, - {node_get_fsm_time_100, GetH}, - {node_puts, Puts}, - {node_puts_total, NPT}, - {node_put_fsm_time_mean, PutMean}, - {node_put_fsm_time_median, PutMedian}, - {node_put_fsm_time_95, PutNF}, - {node_put_fsm_time_99, PutNN}, - {node_put_fsm_time_100, PutH}, - {node_get_fsm_siblings_mean, SiblingsMean}, - {node_get_fsm_siblings_median, SiblingsMedian}, - {node_get_fsm_siblings_95, SiblingsNF}, - {node_get_fsm_siblings_99, SiblingsNN}, - {node_get_fsm_siblings_100, SiblingsH}, - {node_get_fsm_objsize_mean, ObjSizeMean}, - {node_get_fsm_objsize_median, ObjSizeMedian}, - {node_get_fsm_objsize_95, ObjSizeNF}, - {node_get_fsm_objsize_99, ObjSizeNN}, - {node_get_fsm_objsize_100, ObjSizeH}, - {read_repairs_total, RRT}, - {coord_redirs_total, CRT}, - {precommit_fail, PreF}, - {postcommit_fail, PostF}]; -node_stats(_, State=#state{legacy=false}) -> - PutInfo = metric_stats(State#state.put_fsm_time), - GetInfo = metric_stats(State#state.get_fsm_time), - RRInfo = metric_stats(State#state.read_repairs), - CRInfo = metric_stats(State#state.coord_redirs), - NodeGets = meter_minute(metric_stats(State#state.get_meter)), - NodePuts = meter_minute(metric_stats(State#state.put_meter)), - PreF = State#state.precommit_fail, - PostF = State#state.postcommit_fail, - [{node_gets, NodeGets}, - {node_gets_total, proplists:get_value(count, GetInfo)}, - {node_get_fsm_time_mean, proplists:get_value(mean, GetInfo)}, - {node_get_fsm_time_median, proplists:get_value(p50, GetInfo)}, - {node_get_fsm_time_95, proplists:get_value(p95, GetInfo)}, - {node_get_fsm_time_99, proplists:get_value(p99, GetInfo)}, - {node_get_fsm_time_100, proplists:get_value(max, GetInfo)}, - {node_puts, NodePuts}, - {node_puts_total, proplists:get_value(count, PutInfo)}, - {node_put_fsm_time_mean, proplists:get_value(mean, PutInfo)}, - {node_put_fsm_time_median, proplists:get_value(p50, PutInfo)}, - {node_put_fsm_time_95, proplists:get_value(p95, PutInfo)}, - {node_put_fsm_time_99, proplists:get_value(p99, PutInfo)}, - {node_put_fsm_time_100, proplists:get_value(max, PutInfo)}, - {read_repairs_total, proplists:get_value(count, RRInfo)}, - {coord_redirs_total, proplists:get_value(count, CRInfo)}, - {precommit_fail, PreF}, - {postcommit_fail, PostF}]. - - -%% @spec cpu_stats() -> proplist() -%% @doc Get stats on the cpu, as given by the cpu_sup module -%% of the os_mon application. -cpu_stats() -> - [{cpu_nprocs, cpu_sup:nprocs()}, - {cpu_avg1, cpu_sup:avg1()}, - {cpu_avg5, cpu_sup:avg5()}, - {cpu_avg15, cpu_sup:avg15()}]. - -%% @spec mem_stats() -> proplist() -%% @doc Get stats on the memory, as given by the memsup module -%% of the os_mon application. -mem_stats() -> - {Total, Alloc, _} = memsup:get_memory_data(), - [{mem_total, Total}, - {mem_allocated, Alloc}]. - -%% @spec disk_stats() -> proplist() -%% @doc Get stats on the disk, as given by the disksup module -%% of the os_mon application. -disk_stats() -> - [{disk, disksup:get_disk_data()}]. - -system_stats() -> - [{nodename, node()}, - {connected_nodes, nodes()}, - {sys_driver_version, list_to_binary(erlang:system_info(driver_version))}, - {sys_global_heaps_size, erlang:system_info(global_heaps_size)}, - {sys_heap_type, erlang:system_info(heap_type)}, - {sys_logical_processors, erlang:system_info(logical_processors)}, - {sys_otp_release, list_to_binary(erlang:system_info(otp_release))}, - {sys_process_count, erlang:system_info(process_count)}, - {sys_smp_support, erlang:system_info(smp_support)}, - {sys_system_version, list_to_binary(string:strip(erlang:system_info(system_version), right, $\n))}, - {sys_system_architecture, list_to_binary(erlang:system_info(system_architecture))}, - {sys_threads_enabled, erlang:system_info(threads)}, - {sys_thread_pool_size, erlang:system_info(thread_pool_size)}, - {sys_wordsize, erlang:system_info(wordsize)}]. - -app_stats() -> - [{list_to_atom(atom_to_list(A) ++ "_version"), list_to_binary(V)} - || {A,_,V} <- application:which_applications()]. - -memory_stats() -> - [{list_to_atom("memory_" ++ atom_to_list(K)), V} || {K,V} <- erlang:memory()]. - -ring_stats() -> +%% @doc Update the given stat +do_update({vnode_get, Idx, USecs}) -> + folsom_metrics:notify_existing_metric({?APP, vnode, gets}, 1, spiral), + create_or_update({?APP, vnode, gets, time}, USecs, histogram), + do_per_index(gets, Idx, USecs); +do_update({vnode_put, Idx, USecs}) -> + folsom_metrics:notify_existing_metric({?APP, vnode, puts}, 1, spiral), + create_or_update({?APP, vnode, puts, time}, USecs, histogram), + do_per_index(puts, Idx, USecs); +do_update(vnode_index_read) -> + folsom_metrics:notify_existing_metric({?APP, vnode, index, reads}, 1, spiral); +do_update({vnode_index_write, PostingsAdded, PostingsRemoved}) -> + folsom_metrics:notify_existing_metric({?APP, vnode, index, writes}, 1, spiral), + folsom_metrics:notify_existing_metric({?APP, vnode, index, writes, postings}, PostingsAdded, spiral), + folsom_metrics:notify_existing_metric({?APP, vnode, index, deletes, postings}, PostingsRemoved, spiral); +do_update({vnode_index_delete, Postings}) -> + folsom_metrics:notify_existing_metric({?APP, vnode, index, deletes}, Postings, spiral), + folsom_metrics:notify_existing_metric({?APP, vnode, index, deletes, postings}, Postings, spiral); +do_update({get_fsm, Bucket, Microsecs, Stages, undefined, undefined, PerBucket}) -> + folsom_metrics:notify_existing_metric({?APP, node, gets}, 1, spiral), + folsom_metrics:notify_existing_metric({?APP, node, gets, time}, Microsecs, histogram), + do_stages([?APP, node, gets, time], Stages), + do_get_bucket(PerBucket, {Bucket, Microsecs, Stages, undefined, undefined}); +do_update({get_fsm, Bucket, Microsecs, Stages, NumSiblings, ObjSize, PerBucket}) -> + folsom_metrics:notify_existing_metric({?APP, node, gets}, 1, spiral), + folsom_metrics:notify_existing_metric({?APP, node, gets, time}, Microsecs, histogram), + folsom_metrics:notify_existing_metric({?APP, node, gets, siblings}, NumSiblings, histogram), + folsom_metrics:notify_existing_metric({?APP, node, gets, objsize}, ObjSize, histogram), + do_stages([?APP, node, gets, time], Stages), + do_get_bucket(PerBucket, {Bucket, Microsecs, Stages, NumSiblings, ObjSize}); +do_update({put_fsm_time, Bucket, Microsecs, Stages, PerBucket}) -> + folsom_metrics:notify_existing_metric({?APP, node, puts}, 1, spiral), + folsom_metrics:notify_existing_metric({?APP, node, puts, time}, Microsecs, histogram), + do_stages([?APP, node, puts, time], Stages), + do_put_bucket(PerBucket, {Bucket, Microsecs, Stages}); +do_update({read_repairs, Indices, Preflist}) -> + folsom_metrics:notify_existing_metric({?APP, node, gets, read_repairs}, 1, spiral), + do_repairs(Indices, Preflist); +do_update(coord_redir) -> + folsom_metrics:notify_existing_metric({?APP, node, puts, coord_redirs}, {inc, 1}, counter); +do_update(mapper_start) -> + folsom_metrics:notify_existing_metric({?APP, mapper_count}, {inc, 1}, counter); +do_update(mapper_end) -> + folsom_metrics:notify_existing_metric({?APP, mapper_count}, {dec, 1}, counter); +do_update(precommit_fail) -> + folsom_metrics:notify_existing_metric({?APP, precommit_fail}, {inc, 1}, counter); +do_update(postcommit_fail) -> + folsom_metrics:notify_existing_metric({?APP, postcommit_fail}, {inc, 1}, counter). + +%% private +%% Per index stats (by op) +do_per_index(Op, Idx, USecs) -> + IdxAtom = list_to_atom(integer_to_list(Idx)), + create_or_update({?APP, vnode, Op, IdxAtom}, 1, spiral), + create_or_update({?APP, vnode, Op, time, IdxAtom}, USecs, histogram). + +%% per bucket get_fsm stats +do_get_bucket(false, _) -> + ok; +do_get_bucket(true, {Bucket, Microsecs, Stages, NumSiblings, ObjSize}=Args) -> + case (catch folsom_metrics:notify_existing_metric({?APP, node, gets, Bucket}, 1, spiral)) of + ok -> + [folsom_metrics:notify_existing_metric({?APP, node, gets, Dimension, Bucket}, Arg, histogram) + || {Dimension, Arg} <- [{time, Microsecs}, + {siblings, NumSiblings}, + {objsize, ObjSize}], Arg /= undefined], + do_stages([?APP, node, gets, time, Bucket], Stages); + {'EXIT', _} -> + folsom_metrics:new_spiral({?APP, node, gets, Bucket}), + [register_stat({?APP, node, gets, Dimension, Bucket}, histogram) || Dimension <- [time, + siblings, + objsize]], + do_get_bucket(true, Args) + end. + +%% per bucket put_fsm stats +do_put_bucket(false, _) -> + ok; +do_put_bucket(true, {Bucket, Microsecs, Stages}=Args) -> + case (catch folsom_metrics:notify_existing_metric({?APP, node, puts, Bucket}, 1, spiral)) of + ok -> + folsom_metrics:notify_existing_metric({?APP, node, puts, time, Bucket}, Microsecs, histogram), + do_stages([?APP, node, puts, time, Bucket], Stages); + {'EXIT', _} -> + register_stat({?APP, node, puts, Bucket}, spiral), + register_stat({?APP, node, puts, time, Bucket}, histogram), + do_put_bucket(true, Args) + end. + +%% Path is list that provides a conceptual path to a stat +%% folsom uses the tuple as flat name +%% but some ets query magic means we can get stats by APP, Stat, DimensionX +%% Path, then is a list like [?APP, StatName] +%% Both get and put fsm have a list of {state, microseconds} +%% that they provide for stats. +%% Use the state to append to the stat "path" to create a further dimension on the stat +do_stages(_Path, []) -> + ok; +do_stages(Path, [{Stage, Time}|Stages]) -> + create_or_update(list_to_tuple(Path ++ [Stage]), Time, histogram), + do_stages(Path, Stages). + +%% create dimensioned stats for read repairs. +%% The indexes are from get core [{Index, Reason::notfound|outofdate}] +%% preflist is a preflist of [{{Index, Node}, Type::primary|fallback}] +do_repairs(Indices, Preflist) -> + lists:foreach(fun({{Idx, Node}, Type}) -> + case proplists:get_value(Idx, Indices) of + undefined -> + ok; + Reason -> + create_or_update({?APP, node, gets, read_repairs, Node, Type, Reason}, 1, spiral) + end + end, + Preflist). + +%% for dynamically created / dimensioned stats +%% that can't be registered at start up +create_or_update(Name, UpdateVal, Type) -> + case (catch folsom_metrics:notify_existing_metric(Name, UpdateVal, Type)) of + ok -> + ok; + {'EXIT', _} -> + register_stat(Name, Type), + create_or_update(Name, UpdateVal, Type) + end. + +%% Stats are namespaced by APP in folsom +%% so that we don't need to co-ordinate on naming +%% between apps. +stat_name(Name) when is_list(Name) -> + list_to_tuple([?APP] ++ Name); +stat_name(Name) when is_atom(Name) -> + {?APP, Name}. + +%% @doc list of {Name, Type} for static +%% stats that we can register at start up +stats() -> + [{[vnode, gets], spiral}, + {[vnode, gets, time], histogram}, + {[vnode, puts], spiral}, + {[vnode, puts, time], histogram}, + {[vnode, index, reads], spiral}, + {[vnode, index ,writes], spiral}, + {[vnode, index, writes, postings], spiral}, + {[vnode, index, deletes], spiral}, + {[vnode, index, deletes, postings], spiral}, + {[node, gets], spiral}, + {[node, gets, siblings], histogram}, + {[node, gets, objsize], histogram}, + {[node, gets, time], histogram}, + {[node, puts], spiral}, + {[node, puts, time], histogram}, + {[node, gets, read_repairs], spiral}, + {[node, puts, coord_redirs], counter}, + {mapper_count, counter}, + {precommit_fail, counter}, + {postcommit_fail, counter}, + {[vnode, backend, leveldb, read_block_error], + {function, {function, ?MODULE, leveldb_read_block_errors}}}]. + +%% @doc register a stat with folsom +register_stat(Name, spiral) -> + folsom_metrics:new_spiral(Name); +register_stat(Name, counter) -> + folsom_metrics:new_counter(Name); +register_stat(Name, histogram) -> + %% get the global default histo type + {SampleType, SampleArgs} = get_sample_type(Name), + folsom_metrics:new_histogram(Name, SampleType, SampleArgs); +register_stat(Name, {function, F}) -> + %% store the function in a gauge metric + folsom_metrics:new_gauge(Name), + folsom_metrics:notify({Name, F}). + +%% @doc the histogram sample type may be set in app.config +%% use key `stat_sample_type' in the `riak_kv' section. Or the +%% name of an `histogram' stat. +%% Check the folsom homepage for available types. +%% Defaults to `{slide_uniform, {60, 1028}}' (a uniform sliding window +%% of 60 seconds, with a uniform sample of at most 1028 entries) +get_sample_type(Name) -> + SampleType0 = app_helper:get_env(riak_kv, stat_sample_type, {slide_uniform, {60, 1028}}), + app_helper:get_env(riak_kv, Name, SampleType0). + +%% @doc produce the legacy blob of stats for display. +produce_stats() -> + riak_kv_stat_bc:produce_stats(). + +%% @doc get the leveldb.ReadBlockErrors counter. +%% non-zero values mean it is time to consider replacing +%% this nodes disk. +leveldb_read_block_errors() -> + %% level stats are per node + %% but the way to get them is + %% is with riak_kv_vnode:vnode_status/1 + %% for that reason just chose a partition + %% on this node at random + %% and ask for it's stats {ok, R} = riak_core_ring_manager:get_my_ring(), - [{ring_members, riak_core_ring:all_members(R)}, - {ring_num_partitions, riak_core_ring:num_partitions(R)}, - {ring_ownership, list_to_binary(lists:flatten(io_lib:format("~p", [dict:to_list( - lists:foldl(fun({_P, N}, Acc) -> - case dict:find(N, Acc) of - {ok, V} -> - dict:store(N, V+1, Acc); - error -> - dict:store(N, 1, Acc) - end - end, dict:new(), riak_core_ring:all_owners(R)))])))}]. - - -config_stats() -> - [{ring_creation_size, app_helper:get_env(riak_core, ring_creation_size)}, - {storage_backend, app_helper:get_env(riak_kv, storage_backend)}]. - -mapper_stats(#state{mapper_count=Count}) -> - [{executing_mappers, Count}]. - -%% @spec pbc_stats(integer(), state()) -> proplist() -%% @doc Get stats on the disk, as given by the disksup module -%% of the os_mon application. -pbc_stats(Moment, State=#state{pbc_connects_total=NCT, pbc_active=Active, legacy=true}) -> - case whereis(riak_kv_pb_socket_sup) of - undefined -> - []; - _ -> [{pbc_connects_total, NCT}, - {pbc_connects, spiral_minute(Moment, #state.pbc_connects, State)}, - {pbc_active, Active}] - end; -pbc_stats(_, State=#state{pbc_connects_total=NCT, - pbc_active=Active, legacy=false}) -> - case whereis(riak_kv_pb_socket_sup) of - undefined -> - []; - _ -> - NC = metric_stats(State#state.pbc_connects), - [{pbc_connects_total, NCT}, - {pbc_connects, meter_minute(NC)}, - {pbc_active, Active}] - end. - - -remove_slide_private_dirs() -> - os:cmd("rm -rf " ++ slide:private_dir()). + Indices = riak_core_ring:my_indices(R), + Nth = crypto:rand_uniform(1, length(Indices)), + Idx = lists:nth(Nth, Indices), + PList = [{Idx, node()}], + [{Idx, [Status]}] = riak_kv_vnode:vnode_status(PList), + leveldb_read_block_errors(Status). + +leveldb_read_block_errors({backend_status, riak_kv_eleveldb_backend, Status}) -> + rbe_val(proplists:get_value(read_block_error, Status)); +leveldb_read_block_errors({backend_status, riak_kv_multi_backend, Statuses}) -> + multibackend_read_block_errors(Statuses, undefined); +leveldb_read_block_errors(_) -> + undefined. + +multibackend_read_block_errors([], Val) -> + rbe_val(Val); +multibackend_read_block_errors([{_Name, Status}|Rest], undefined) -> + RBEVal = case proplists:get_value(mod, Status) of + riak_kv_eleveldb_backend -> + proplists:get_value(read_block_error, Status); + _ -> undefined + end, + multibackend_read_block_errors(Rest, RBEVal); +multibackend_read_block_errors(_, Val) -> + rbe_val(Val). + +rbe_val(undefined) -> + undefined; +rbe_val(Bin) -> + list_to_integer(binary_to_list(Bin)). diff --git a/src/riak_kv_stat_bc.erl b/src/riak_kv_stat_bc.erl new file mode 100644 index 0000000000..a458473d1a --- /dev/null +++ b/src/riak_kv_stat_bc.erl @@ -0,0 +1,402 @@ +%% ------------------------------------------------------------------- +%% +%% riak_kv_stat_bc: backwards compatible stats module. Maps new folsom stats +%% to legacy riak_kv stats. +%% +%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +%% @doc riak_kv_stat_bc is a module that maps the new riak_kv_stats metrics +%% to the old set of stats. It exists to maintain backwards compatibility for +%% those using the `/stats` endpoint and `riak-admin status`. This module +%% should be considered soon to be deprecated and temporary. +%% +%% Legacy stats: +%%
vnode_gets +%%
Total number of gets handled by all vnodes on this node +%% in the last minute. +%%
+%%
vnode_puts +%%
Total number of puts handled by all vnodes on this node +%% in the last minute. +%%
+%%
vnode_index_reads +%%
The number of index reads handled by all vnodes on this node. +%% Each query counts as an index read. +%%
< +%%
vnode_index_writes +%%
The number of batched writes handled by all vnodes on this node. +%%
+%%
vnode_index_writes_postings +%%
The number of postings written to all vnodes on this node. +%%
+%%
vnode_index_deletes +%%
The number of batched writes handled by all vnodes on this node. +%%
update({vnode_index_delete, PostingsRemoved}) +%% +%%
vnode_index_deletes_postings +%%
The number of postings written to all vnodes on this node. +%%
node_gets +%%
Number of gets coordinated by this node in the last +%% minute. +%%
node_get_fsm_siblings +%%
Stats about number of siblings per object in the last minute. +%%
node_get_fsm_objsize +%%
Stats about object size over the last minute. The object +%% size is an estimate calculated by summing the size of the +%% bucket name, key name, and serialized vector clock, plus +%% the value and serialized metadata of each sibling. +%%
node_get_fsm_time_mean +%%
Mean time, in microseconds, between when a riak_kv_get_fsm is +%% started and when it sends a reply to the client, for the +%% last minute. +%%
node_get_fsm_time_median +%%
Median time, in microseconds, between when a riak_kv_get_fsm +%% is started and when it sends a reply to the client, for +%% the last minute. +%%
node_get_fsm_time_95 +%%
Response time, in microseconds, met or beaten by 95% of +%% riak_kv_get_fsm executions. +%%
node_get_fsm_time_99 +%%
Response time, in microseconds, met or beaten by 99% of +%% riak_kv_get_fsm executions. +%%
node_get_fsm_time_100 +%%
Response time, in microseconds, met or beaten by 100% of +%% riak_kv_get_fsm executions. +%%
node_puts +%%
Number of puts coordinated by this node in the last +%% minute. +%%
node_put_fsm_time_mean +%%
Mean time, in microseconds, between when a riak_kv_put_fsm is +%% started and when it sends a reply to the client, for the +%% last minute. +%%
node_put_fsm_time_median +%%
Median time, in microseconds, between when a riak_kv_put_fsm +%% is started and when it sends a reply to the client, for +%% the last minute. +%%
node_put_fsm_time_95 +%%
Response time, in microseconds, met or beaten by 95% of +%% riak_kv_put_fsm executions. +%%
node_put_fsm_time_99 +%%
Response time, in microseconds, met or beaten by 99% of +%% riak_kv_put_fsm executions. +%%
node_put_fsm_time_100 +%%
Response time, in microseconds, met or beaten by 100% of +%% riak_kv_put_fsm executions. +%%
cpu_nprocs +%%
Value returned by {@link cpu_sup:nprocs/0}. +%% +%%
cpu_avg1 +%%
Value returned by {@link cpu_sup:avg1/0}. +%% +%%
cpu_avg5 +%%
Value returned by {@link cpu_sup:avg5/0}. +%% +%%
cpu_avg15 +%%
Value returned by {@link cpu_sup:avg15/0}. +%% +%%
mem_total +%%
The first element of the tuple returned by +%% {@link memsup:get_memory_data/0}. +%% +%%
mem_allocated +%%
The second element of the tuple returned by +%% {@link memsup:get_memory_data/0}. +%% +%%
disk +%%
Value returned by {@link disksup:get_disk_data/0}. +%% +%%
pbc_connects_total +%%
Total number of pb socket connections since start +%% +%%
pbc_active +%%
Number of active pb socket connections +%% +%%
coord_redirs_total +%%
Number of puts forwarded to be coordinated on a node +%% in the preflist. +%% +%%
+%% +%% +-module(riak_kv_stat_bc). + +-compile(export_all). + +%% @spec produce_stats(state(), integer()) -> proplist() +%% @doc Produce a proplist-formatted view of the current aggregation +%% of stats. +produce_stats() -> + lists:append( + [lists:flatten(backwards_compat(riak_core_stat_q:get_stats([riak_kv]))), + backwards_compat_pb(riak_core_stat_q:get_stats([riak_api])), + read_repair_stats(), + level_stats(), + pipe_stats(), + cpu_stats(), + mem_stats(), + disk_stats(), + system_stats(), + ring_stats(), + config_stats(), + app_stats(), + memory_stats() + ]). + +%% Stats in folsom are stored with tuples as keys, the +%% tuples mimic an hierarchical structure. To be free of legacy +%% naming constraints the new names are not simply the old names +%% with commas for underscores. Uses legacy_stat_map to generate +%% legacys stats from the new list of stats. +backwards_compat(Stats) -> + [bc_stat(Old, New, Type, Stats) || {Old, New, Type} <- legacy_stat_map()]. + +bc_stat(Old, {New, Field}, histogram_percentile, Stats) -> + Stat = proplists:get_value(New, Stats), + Percentile = proplists:get_value(percentile, Stat), + Val = proplists:get_value(Field, Percentile), + {Old, trunc(Val)}; +bc_stat(Old, {New, Field}, histogram, Stats) -> + Stat = proplists:get_value(New, Stats), + Val = proplists:get_value(Field, Stat), + {Old, trunc(Val)}; +bc_stat(Old, {New, Field}, spiral, Stats) -> + Stat = proplists:get_value(New, Stats), + Val = proplists:get_value(Field, Stat), + {Old, Val}; +bc_stat(Old, New, counter, Stats) -> + Stat = proplists:get_value(New, Stats), + {Old, Stat}. + + +%% hard coded mapping of stats to legacy format +%% There was a enough variation in the old names that a simple +%% concatenation of the elements in the new stat key would not suffice +%% applications depend on these exact legacy names. +legacy_stat_map() -> + [{vnode_gets, {{riak_kv, vnode, gets}, one}, spiral}, + {vnode_gets_total, {{riak_kv, vnode, gets}, count}, spiral}, + {vnode_puts, {{riak_kv, vnode, puts}, one}, spiral}, + {vnode_puts_total, {{riak_kv, vnode, puts}, count}, spiral}, + {vnode_index_reads, {{riak_kv, vnode, index, reads}, one}, spiral}, + {vnode_index_reads_total, {{riak_kv, vnode, index, reads}, count}, spiral}, + {vnode_index_writes, {{riak_kv, vnode, index, writes}, one}, spiral}, + {vnode_index_writes_total, {{riak_kv, vnode, index, writes}, count}, spiral}, + {vnode_index_writes_postings, {{riak_kv,vnode,index,writes,postings}, one}, spiral}, + {vnode_index_writes_postings_total, {{riak_kv,vnode,index,writes,postings}, count}, spiral}, + {vnode_index_deletes, {{riak_kv,vnode,index,deletes}, one}, spiral}, + {vnode_index_deletes_total, {{riak_kv,vnode,index,deletes}, count}, spiral}, + {vnode_index_deletes_postings, {{riak_kv,vnode,index,deletes,postings}, one}, spiral}, + {vnode_index_deletes_postings_total, {{riak_kv,vnode,index,deletes,postings}, count}, spiral}, + {node_gets, {{riak_kv,node,gets}, one}, spiral}, + {node_gets_total, {{riak_kv,node,gets}, count}, spiral}, + {node_get_fsm_siblings_mean, {{riak_kv,node,gets,siblings}, arithmetic_mean}, histogram}, + {node_get_fsm_siblings_median, {{riak_kv,node,gets,siblings}, median}, histogram}, + {node_get_fsm_siblings_95, {{riak_kv,node,gets,siblings}, 95}, histogram_percentile}, + {node_get_fsm_siblings_99, {{riak_kv,node,gets,siblings}, 99}, histogram_percentile}, + {node_get_fsm_siblings_100, {{riak_kv,node,gets,siblings}, max}, histogram}, + {node_get_fsm_objsize_mean, {{riak_kv,node,gets,objsize}, arithmetic_mean}, histogram}, + {node_get_fsm_objsize_median, {{riak_kv,node,gets,objsize}, median}, histogram}, + {node_get_fsm_objsize_95, {{riak_kv,node,gets,objsize}, 95}, histogram_percentile}, + {node_get_fsm_objsize_99, {{riak_kv,node,gets,objsize}, 99}, histogram_percentile}, + {node_get_fsm_objsize_100, {{riak_kv,node,gets,objsize}, max}, histogram}, + {node_get_fsm_time_mean, {{riak_kv,node,gets,time}, arithmetic_mean}, histogram}, + {node_get_fsm_time_median, {{riak_kv,node,gets,time}, median}, histogram}, + {node_get_fsm_time_95, {{riak_kv,node,gets,time}, 95}, histogram_percentile}, + {node_get_fsm_time_99, {{riak_kv,node,gets,time}, 99}, histogram_percentile}, + {node_get_fsm_time_100, {{riak_kv,node,gets,time}, max}, histogram}, + {node_puts, {{riak_kv,node, puts}, one}, spiral}, + {node_puts_total, {{riak_kv,node, puts}, count}, spiral}, + {node_put_fsm_time_mean, {{riak_kv,node, puts, time}, arithmetic_mean}, histogram}, + {node_put_fsm_time_median, {{riak_kv,node, puts, time}, median}, histogram}, + {node_put_fsm_time_95, {{riak_kv,node, puts, time}, 95}, histogram_percentile}, + {node_put_fsm_time_99, {{riak_kv,node, puts, time}, 99}, histogram_percentile}, + {node_put_fsm_time_100, {{riak_kv,node, puts, time}, max}, histogram}, + {read_repairs, {{riak_kv,node,gets,read_repairs}, one}, spiral}, + {read_repairs_total, {{riak_kv,node,gets,read_repairs}, count}, spiral}, + {coord_redirs_total, {riak_kv,node,puts,coord_redirs}, counter}, + {executing_mappers, {riak_kv,mapper_count}, counter}, + {precommit_fail, {riak_kv, precommit_fail}, counter}, + {postcommit_fail, {riak_kv, postcommit_fail}, counter} + ]. + +%% PB stats are now under riak_api. In the past they were part of riak_kv. +%% This function maps those new values to the old names. +backwards_compat_pb(Stats) -> + [bc_stat(Old, New, Type, Stats) || {Old, New, Type} <- + [{pbc_active, {riak_api, pbc_connects, active}, counter}, + {pbc_connects, {{riak_api, pbc_connects}, one}, spiral}, + {pbc_connects_total, {{riak_api, pbc_connects}, count}, spiral}]]. + +%% @spec cpu_stats() -> proplist() +%% @doc Get stats on the cpu, as given by the cpu_sup module +%% of the os_mon application. +cpu_stats() -> + [{cpu_nprocs, cpu_sup:nprocs()}, + {cpu_avg1, cpu_sup:avg1()}, + {cpu_avg5, cpu_sup:avg5()}, + {cpu_avg15, cpu_sup:avg15()}]. + +%% @spec mem_stats() -> proplist() +%% @doc Get stats on the memory, as given by the memsup module +%% of the os_mon application. +mem_stats() -> + {Total, Alloc, _} = memsup:get_memory_data(), + [{mem_total, Total}, + {mem_allocated, Alloc}]. + +%% @spec disk_stats() -> proplist() +%% @doc Get stats on the disk, as given by the disksup module +%% of the os_mon application. +disk_stats() -> + [{disk, disksup:get_disk_data()}]. + +system_stats() -> + [{nodename, node()}, + {connected_nodes, nodes()}, + {sys_driver_version, list_to_binary(erlang:system_info(driver_version))}, + {sys_global_heaps_size, erlang:system_info(global_heaps_size)}, + {sys_heap_type, erlang:system_info(heap_type)}, + {sys_logical_processors, erlang:system_info(logical_processors)}, + {sys_otp_release, list_to_binary(erlang:system_info(otp_release))}, + {sys_process_count, erlang:system_info(process_count)}, + {sys_smp_support, erlang:system_info(smp_support)}, + {sys_system_version, list_to_binary(string:strip(erlang:system_info(system_version), right, $\n))}, + {sys_system_architecture, list_to_binary(erlang:system_info(system_architecture))}, + {sys_threads_enabled, erlang:system_info(threads)}, + {sys_thread_pool_size, erlang:system_info(thread_pool_size)}, + {sys_wordsize, erlang:system_info(wordsize)}]. + +app_stats() -> + [{list_to_atom(atom_to_list(A) ++ "_version"), list_to_binary(V)} + || {A,_,V} <- application:which_applications()]. + +memory_stats() -> + [{list_to_atom("memory_" ++ atom_to_list(K)), V} || {K,V} <- erlang:memory()]. + +ring_stats() -> + {ok, R} = riak_core_ring_manager:get_my_ring(), + [{ring_members, riak_core_ring:all_members(R)}, + {ring_num_partitions, riak_core_ring:num_partitions(R)}, + {ring_ownership, list_to_binary(lists:flatten(io_lib:format("~p", [dict:to_list( + lists:foldl(fun({_P, N}, Acc) -> + case dict:find(N, Acc) of + {ok, V} -> + dict:store(N, V+1, Acc); + error -> + dict:store(N, 1, Acc) + end + end, dict:new(), riak_core_ring:all_owners(R)))])))}]. + + +config_stats() -> + [{ring_creation_size, app_helper:get_env(riak_core, ring_creation_size)}, + {storage_backend, app_helper:get_env(riak_kv, storage_backend)}]. + +%% @doc add the pipe stats to the blob in a style consistent +%% with those stats already in the blob +pipe_stats() -> + Stats = riak_core_stat_q:get_stats([riak_pipe]), + lists:flatten([bc_stat(Name, Val) || {Name, Val} <- Stats]). + +%% old style blob stats don't have the app name +%% and they have underscores, not commas +bc_stat(Name, Val) -> + StatName = join(tl(tuple_to_list(Name))), + bc_stat_val(StatName, Val). + +%% Old style stats don't have tuple lists as values +%% they have an entry per element in the complex stats tuple list +%% so a spiral with both a count and a one minute reading +%% would be two stats, of NAME_count and NAME_one +%% let's do that +bc_stat_val(StatName, Val) when is_list(Val) -> + [{join([StatName, ValName]), ValVal} || {ValName, ValVal} <- Val]; +bc_stat_val(StatName, Val) -> + {StatName, Val}. + +%% Leveldb stats are a last minute new edition to the blob +level_stats() -> + Stats = riak_core_stat_q:get_stats([riak_kv, vnode, backend, leveldb, read_block_error]), + [{join(lists:nthtail(3, tuple_to_list(Name))), Val} || {Name, Val} <- Stats]. + +%% Read repair stats are a new edition to the legacy blob. +%% Added to the blob since the stat query interface was not ready for the 1.3 +%% release. +%% The read repair stats are stored as dimensions with +%% the key {riak_kv, node, gets, read_repairs, Node, Type, Reason}. +%% The CSEs are only interested in aggregations of Type and Reason +%% which are elements 6 and 7 in the key. +read_repair_stats() -> + aggregate(read_repairs, [riak_kv, node, gets, read_repairs, '_', '_', '_'], [6,7]). + +%% TODO generalise for riak_core_stat_q +%% aggregates spiral values for stats retrieved by `Query' +%% aggregates by the key field(s) indexed at `Fields' +%% produces a flat list of `BaseName_NameOfFieldAtIndex[_count]' +%% to fit in with the existing naming convention in the legacy stat blob +aggregate(BaseName, Query, Fields) -> + Stats = riak_core_stat_q:get_stats(Query), + Aggregates = do_aggregate(Stats, Fields), + FlatStats = flatten_aggregate_stats(BaseName, Aggregates), + lists:flatten(FlatStats). + +do_aggregate(Stats, Fields) -> + lists:foldl(fun({Name, [{count, C0}, {one, O0}]}, Acc) -> + Key = key_from_fields(Name, Fields), + [{count, C}, {one, O}] = case orddict:find(Key, Acc) of + error -> [{count, 0}, {one, 0}]; + {ok, V} -> V + end, + orddict:store(Key, [{count, C+C0}, {one, O+O0}], Acc) + end, + orddict:new(), + Stats). + +%% Generate a dictionary key for the running +%% aggregation using key `Name' elements at index(es) +%% in `Fields' +key_from_fields(Name, Fields) -> + Key = [element(N, Name) || N <- Fields], + join(Key). + +%% Folds over the aggregate nested dictionaries to create +%% a flat list of stats whose names are made by +%% joining key names to `BaseName' +flatten_aggregate_stats(BaseName, Aggregates) -> + orddict:fold(fun(K, V, Acc) when not is_list(V) -> + [{join([BaseName, K]), V}|Acc]; + (K, V, Acc) -> + [flatten_aggregate_stats(join([BaseName, K]), V)|Acc] + end, + [], + Aggregates). + +%% Join a list of atoms into a single atom +%% with elements separated by '_' +join(L) -> + join(L, <<>>). + +join([], Bin) -> + binary_to_atom(Bin, latin1); +join([Atom|Rest], <<>>) -> + Bin2 = atom_to_binary(Atom, latin1), + join(Rest, <>); +join([Atom|Rest], Bin) -> + Bin2 = atom_to_binary(Atom, latin1), + join(Rest, <>). diff --git a/src/riak_kv_status.erl b/src/riak_kv_status.erl index 9570010768..7bd1ef3c99 100644 --- a/src/riak_kv_status.erl +++ b/src/riak_kv_status.erl @@ -34,12 +34,7 @@ -spec(statistics() -> [any()]). statistics() -> - case whereis(riak_kv_stat) of - undefined -> - []; - _ -> - riak_kv_stat:get_stats() - end. + riak_kv_stat:get_stats(). ringready() -> riak_core_status:ringready(). diff --git a/src/riak_kv_sup.erl b/src/riak_kv_sup.erl index 9e1b757800..4f7f3dd1ac 100644 --- a/src/riak_kv_sup.erl +++ b/src/riak_kv_sup.erl @@ -41,18 +41,12 @@ start_link() -> %% @spec init([]) -> SupervisorTree %% @doc supervisor callback. init([]) -> + catch dtrace:init(), % NIF load trigger (R14B04) + catch dyntrace:p(), % NIF load trigger (R15B01+) VMaster = {riak_kv_vnode_master, {riak_core_vnode_master, start_link, - [riak_kv_vnode, riak_kv_legacy_vnode]}, + [riak_kv_vnode, riak_kv_legacy_vnode, riak_kv]}, permanent, 5000, worker, [riak_core_vnode_master]}, - RiakPb = [ {riak_kv_pb_socket_sup, {riak_kv_pb_socket_sup, start_link, []}, - permanent, infinity, supervisor, [riak_kv_pb_socket_sup]}, - {riak_kv_pb_listener, {riak_kv_pb_listener, start_link, []}, - permanent, 5000, worker, [riak_kv_pb_listener]} - ], - RiakStat = {riak_kv_stat, - {riak_kv_stat, start_link, []}, - permanent, 5000, worker, [riak_kv_stat]}, MapJSPool = {?JSPOOL_MAP, {riak_kv_js_manager, start_link, [?JSPOOL_MAP, read_js_pool_size(map_js_vm_count, "map")]}, @@ -68,25 +62,6 @@ init([]) -> JSSup = {riak_kv_js_sup, {riak_kv_js_sup, start_link, []}, permanent, infinity, supervisor, [riak_kv_js_sup]}, - %% @TODO This code is only here to support - %% rolling upgrades and will be removed. - KLMaster = {riak_kv_keylister_master, - {riak_kv_keylister_master, start_link, []}, - permanent, 30000, worker, [riak_kv_keylister_master]}, - %% @TODO This code is only here to support - %% rolling upgrades and will be removed. - KLSup = {riak_kv_keylister_legacy_sup, - {riak_kv_keylister_legacy_sup, start_link, []}, - permanent, infinity, supervisor, [riak_kv_keylister_sup]}, - MapCache = {riak_kv_mapred_cache, - {riak_kv_mapred_cache, start_link, []}, - permanent, 30000, worker, [riak_kv_mapred_cache]}, - MapMaster = {riak_kv_map_master, - {riak_kv_map_master, start_link, []}, - permanent, 30000, worker, [riak_kv_map_master]}, - MapperSup = {riak_kv_mapper_sup, - {riak_kv_mapper_sup, start_link, []}, - permanent, infinity, supervisor, [riak_kv_mapper_sup]}, GetFsmSup = {riak_kv_get_fsm_sup, {riak_kv_get_fsm_sup, start_link, []}, permanent, infinity, supervisor, [riak_kv_get_fsm_sup]}, @@ -105,39 +80,27 @@ init([]) -> IndexFsmSup = {riak_kv_index_fsm_sup, {riak_kv_index_fsm_sup, start_link, []}, permanent, infinity, supervisor, [riak_kv_index_fsm_sup]}, - %% @TODO This code is only here to support - %% rolling upgrades and will be removed. - LegacyKeysFsmSup = {riak_kv_keys_fsm_legacy_sup, - {riak_kv_keys_fsm_legacy_sup, start_link, []}, - permanent, infinity, supervisor, [riak_kv_keys_fsm_legacy_sup]}, + SinkFsmSup = {riak_kv_mrc_sink_sup, + {riak_kv_mrc_sink_sup, start_link, []}, + permanent, infinity, supervisor, [riak_kv_mrc_sink_sup]}, % Figure out which processes we should run... - IsPbConfigured = (app_helper:get_env(riak_kv, pb_ip) /= undefined) - andalso (app_helper:get_env(riak_kv, pb_port) /= undefined), HasStorageBackend = (app_helper:get_env(riak_kv, storage_backend) /= undefined), - IsStatEnabled = (app_helper:get_env(riak_kv, riak_kv_stat) == true), % Build the process list... Processes = lists:flatten([ ?IF(HasStorageBackend, VMaster, []), - ?IF(IsPbConfigured, RiakPb, []), - ?IF(IsStatEnabled, RiakStat, []), GetFsmSup, PutFsmSup, DeleteSup, + SinkFsmSup, BucketsFsmSup, KeysFsmSup, IndexFsmSup, - LegacyKeysFsmSup, - KLSup, - KLMaster, JSSup, MapJSPool, ReduceJSPool, - HookJSPool, - MapperSup, - MapMaster, - MapCache + HookJSPool ]), % Run the proesses... diff --git a/src/riak_kv_test_util.erl b/src/riak_kv_test_util.erl index e94cc30fbb..6a5d8e4a14 100644 --- a/src/riak_kv_test_util.erl +++ b/src/riak_kv_test_util.erl @@ -29,21 +29,57 @@ -export([call_unused_fsm_funs/1, stop_process/1, wait_for_pid/1, - wait_for_children/1]). + wait_for_children/1, + common_setup/1, + common_setup/2, + common_cleanup/1, + common_cleanup/2]). + -include_lib("eunit/include/eunit.hrl"). +-define(SETUPTHUNK, fun(_) -> ok end). + +%% @doc Creates a setup function for tests that need Riak KV stood +%% up in an isolated fashion. +%% @see setup/3 +-spec common_setup(TestName::atom() | string()) -> fun(). +common_setup(T) when is_atom(T) -> + common_setup(atom_to_list(T)); +common_setup(TestName) -> + common_setup(TestName, ?SETUPTHUNK). + +-spec common_setup(atom() | string(), SetupFun::fun((load|start|stop) -> any())) -> fun(). +common_setup(T, S) when is_atom(T) -> + common_setup(atom_to_list(T), S); +common_setup(TestName, Setup) -> + fun() -> setup(TestName, Setup) end. + +%% @doc Creates a cleanup function for tests that need Riak KV stood up in +%% an isolated fashion. +%% @see cleanup/3 +-spec common_cleanup(TestName::atom() | string()) -> fun(). +common_cleanup(T) when is_atom(T) -> + common_cleanup(atom_to_list(T)); +common_cleanup(TestName) -> + common_cleanup(TestName, ?SETUPTHUNK). +-spec common_cleanup(TestName::atom() | string(), CleanupFun::fun((stop) -> any())) -> fun(). +common_cleanup(T, C) when is_atom(T) -> + common_cleanup(atom_to_list(T), C); +common_cleanup(TestName, Cleanup) -> + fun(X) -> cleanup(TestName, Cleanup, X) end. + +%% @doc Calls gen_fsm functions that might not have been touched by a +%% test +-spec call_unused_fsm_funs(module()) -> any(). call_unused_fsm_funs(Mod) -> Mod:handle_event(event, statename, state), Mod:handle_sync_event(event, from, stateneame, state), Mod:handle_info(info, statename, statedata), Mod:terminate(reason, statename, state), Mod:code_change(oldvsn, statename, state, extra). - - - -%% Stop a running pid - unlink and exit(kill) the process -%% + +%% @doc Stop a running pid - unlink and exit(kill) the process stop_process(undefined) -> ok; stop_process(RegName) when is_atom(RegName) -> @@ -53,7 +89,7 @@ stop_process(Pid) when is_pid(Pid) -> exit(Pid, shutdown), ok = wait_for_pid(Pid). -%% Wait for a pid to exit +%% @doc Wait for a pid to exit wait_for_pid(Pid) -> Mref = erlang:monitor(process, Pid), receive @@ -96,4 +132,144 @@ wait_for_children(PPid) -> ok end. +%% @doc Performs generic, riak_kv-specific and test-specific setup +%% when used within a test fixture. This includes cleaning up any +%% leaky state from previous tests (internally calling `cleanup/3'), +%% loading dependent applications, starting distributed Erlang, +%% starting dependent applications, and waiting for riak_kv to become +%% available. +%% +%% The given `SetupFun' will be called first with the argument `stop' +%% before other applications are stopped (to cleanup leaky test +%% state), `load' after all other applications are loaded, and then +%% `start' after all other applications are started. It is generally +%% good practice to use the same function in the `SetupFun' as the +%% `CleanupFun' given to `cleanup/3'. +%% +%% @see common_setup/2, dep_apps/2, do_dep_apps/2 +-spec setup(TestName::string(), fun((load|start|stop) -> any())) -> ok. +setup(TestName, SetupFun) -> + %% Cleanup in case a previous test did not + cleanup(TestName, SetupFun, setup), + + %% Load application environments + Deps = dep_apps(TestName, SetupFun), + do_dep_apps(load, Deps), + + %% Start erlang node + {ok, Hostname} = inet:gethostname(), + TestNode = list_to_atom(TestName ++ "@" ++ Hostname), + net_kernel:start([TestNode, longnames]), + + %% Start dependent applications + do_dep_apps(start, Deps), + + %% Wait for KV to be ready + riak_core:wait_for_application(riak_kv), + riak_core:wait_for_service(riak_kv), + ok. + +%% @doc Performs generic, riak_kv-specific and test-specific cleanup +%% when used within a test fixture. This includes stopping dependent +%% applications, stopping distributed Erlang, and killing pernicious +%% processes. The given `CleanupFun' will be called with the argument +%% `stop' before other components are stopped. +%% +%% @see common_cleanup/2, dep_apps/2, do_dep_apps/2 +-spec cleanup(Test::string(), CleanupFun::fun((stop) -> any()), SetupResult::setup | atom()) -> ok. +cleanup(Test, CleanupFun, setup) -> + %% Remove existing ring files so we have a fresh ring + os:cmd("rm -rf " ++ Test ++ "/ring"), + cleanup(Test, CleanupFun, ok); +cleanup(Test, CleanupFun, _) -> + Deps = lists:reverse(dep_apps(Test, CleanupFun)), + + %% Stop the applications in reverse order + do_dep_apps(stop, Deps), + + %% Cleanup potentially runaway processes + catch exit(whereis(riak_kv_vnode_master), kill), + catch exit(whereis(riak_sysmon_filter), kill), + catch riak_core_stat_cache:stop(), + + %% Stop distributed Erlang + net_kernel:stop(), + + %% Reset the riak_core vnode_modules + application:set_env(riak_core, vnode_modules, []), + ok. + +%% @doc Calculates a list of dependent applications and functions that +%% can be passed to do_deps_apps/2 to perform the lifecycle phase on +%% them all at once. This ensures that applications start and stop in +%% the correct order and the test also has a chance to inject its own +%% setup and teardown code. Included in the sequence are two default +%% setup functions, one that silences SASL logging and redirects it to +%% a file, and one that configures some settings for riak_core and +%% lager. +%% +%% By passing the `Test' argument, the test's data and logging state +%% is also isolated to its own directory so as not to clobber other +%% tests. +%% +%% The `Extra' function takes an atom which represents the phase of +%% application lifecycle, one of `load', `start' or `stop'. +%% +%% @see common_setup/2, common_cleanup/2 +-spec dep_apps(Test::string(), Extra::fun((load | start | stop) -> any())) -> [ atom() | fun() ]. +dep_apps(Test, Extra) -> + Silencer = fun(load) -> + %% Silence logging junk + application:set_env(kernel, error_logger, silent), + filelib:ensure_dir(Test ++ "/log/sasl.log"), + application:set_env(sasl, sasl_error_logger, {file, Test++"/log/sasl.log"}), + error_logger:tty(false); + (_) -> ok + end, + + DefaultSetupFun = + fun(load) -> + %% Set some missing env vars that are normally part of + %% release packaging. These can be overridden by the + %% Extra fun. + application:set_env(riak_core, ring_creation_size, 64), + application:set_env(riak_core, ring_state_dir, Test ++ "/ring"), + application:set_env(riak_core, platform_data_dir, Test ++ "/data"), + application:set_env(riak_core, handoff_port, 0), %% pick a random handoff port + application:set_env(lager, handlers, [{lager_file_backend, + [ + {Test ++ "/log/debug.log", debug, 10485760, "$D0", 5}]}]), + application:set_env(lager, crash_log, Test ++ "/log/crash.log"); + (stop) -> ok; + (_) -> ok + end, + + [sasl, Silencer, crypto, public_key, ssl, riak_sysmon, os_mon, + runtime_tools, erlang_js, inets, mochiweb, webmachine, + basho_stats, bitcask, compiler, syntax_tools, lager, folsom, + riak_core, riak_pipe, riak_api, riak_kv, DefaultSetupFun, Extra]. + + +%% @doc Runs the application-lifecycle phase across all of the given +%% applications and functions. +%% @see dep_apps/2 +-spec do_dep_apps(load | start | stop, [ atom() | fun() ]) -> [ any() ]. +do_dep_apps(StartStop, Apps) -> + lists:map(fun(A) when is_atom(A) -> + case include_app_phase(StartStop, A) of + true -> application:StartStop(A); + _ -> ok + end; + (F) -> F(StartStop) + end, Apps). + +%% @doc Determines whether a given application should be modified in +%% the given phase. If this returns false, the application will not be +%% loaded, started, or stopped by `do_dep_apps/2'. +-spec include_app_phase(Phase::load | start | stop, Application::atom()) -> true | false. +include_app_phase(stop, crypto) -> false; +include_app_phase(start, folsom) -> false; +include_app_phase(_Phase, _App) -> true. + + -endif. % TEST diff --git a/src/riak_kv_util.erl b/src/riak_kv_util.erl index f45ca514b8..384aa1fae5 100644 --- a/src/riak_kv_util.erl +++ b/src/riak_kv_util.erl @@ -32,8 +32,7 @@ expand_value/3, expand_rw_value/4, normalize_rw_value/2, - make_request/2, - mapred_system/0]). + make_request/2]). -include_lib("riak_kv_vnode.hrl"). @@ -145,29 +144,6 @@ normalize_rw_value(quorum, N) -> erlang:trunc((N/2)+1); normalize_rw_value(all, N) -> N; normalize_rw_value(_, _) -> error. -%% @doc Find out which MapReduce system should be used. Returns -%% `legacy' if `riak_client:mapred*' should be used. Returns -%% `pipe' if `riak_kv_mrc_pipe:mapred*' should be used. -%% -%% Depends on the `mapred_system' variable in the `riak_kv' -%% application's environment. --spec mapred_system() -> pipe | legacy. -mapred_system() -> - case app_helper:get_env(riak_kv, mapred_system, legacy) of - pipe -> pipe; - legacy -> legacy; - Other -> - error_logger:warning_msg( - "Unknown value for riak_kv:mapred_system:~n ~p~n" - "Defaulting to 'legacy'.", - [Other]), - %% override user's choice here so that warning doesn't - %% print repeatedly in the log - application:set_env(riak_kv, mapred_system, legacy), - legacy - end. - - %% =================================================================== %% EUnit tests %% =================================================================== diff --git a/src/riak_kv_vnode.erl b/src/riak_kv_vnode.erl index 8d8e0e3ece..c3cfc969ec 100644 --- a/src/riak_kv_vnode.erl +++ b/src/riak_kv_vnode.erl @@ -29,7 +29,6 @@ -export([test_vnode/1, put/7]). -export([start_vnode/1, get/3, - mget/3, del/3, put/6, coord_put/6, @@ -38,7 +37,10 @@ fold/3, get_vclocks/2, vnode_status/1, - ack_keys/1]). + ack_keys/1, + repair/1, + repair_status/1, + repair_filter/1]). %% riak_core_vnode API -export([init/1, @@ -94,9 +96,9 @@ reqid :: non_neg_integer(), bprops :: maybe_improper_list(), starttime :: non_neg_integer(), - prunetime :: undefined| non_neg_integer()}). - -%% TODO: add -specs to all public API funcs, this module seems fragile? + prunetime :: undefined| non_neg_integer(), + is_index=false :: boolean() %% set if the b/end supports indexes + }). %% API start_vnode(I) -> @@ -115,14 +117,6 @@ get(Preflist, BKey, ReqId) -> {fsm, undefined, self()}, riak_kv_vnode_master). -mget(Preflist, BKeys, ReqId) -> - Req = ?KV_MGET_REQ{bkeys=BKeys, - req_id=ReqId, - from={fsm, self()}}, - riak_core_vnode_master:command(Preflist, - Req, - riak_kv_vnode_master). - del(Preflist, BKey, ReqId) -> riak_core_vnode_master:command(Preflist, ?KV_DELETE_REQ{bkey=BKey, @@ -191,7 +185,7 @@ get_vclocks(Preflist, BKeyList) -> %% @doc Get status information about the node local vnodes. -spec vnode_status([{partition(), pid()}]) -> [{atom(), term()}]. vnode_status(PrefLists) -> - ReqId = erlang:phash2(erlang:now()), + ReqId = erlang:phash2({self(), os:timestamp()}), %% Get the status of each vnode riak_core_vnode_master:command(PrefLists, ?KV_VNODE_STATUS_REQ{}, @@ -199,6 +193,33 @@ vnode_status(PrefLists) -> riak_kv_vnode_master), wait_for_vnode_status_results(PrefLists, ReqId, []). +%% @doc Repair the given `Partition'. +-spec repair(partition()) -> + {ok, Pairs::[{partition(), node()}]} | + {down, Down::[{partition(), node()}]}. +repair(Partition) -> + Service = riak_kv, + MP = {riak_kv_vnode, Partition}, + FilterModFun = {?MODULE, repair_filter}, + riak_core_vnode_manager:repair(Service, MP, FilterModFun). + +%% @doc Get the status of the repair process for the given `Partition'. +-spec repair_status(partition()) -> no_repair | repair_in_progress. +repair_status(Partition) -> + riak_core_vnode_manager:repair_status({riak_kv_vnode, Partition}). + +%% @doc Given a `Target' partition generate a `Filter' fun to use +%% during partition repair. +-spec repair_filter(partition()) -> Filter::function(). +repair_filter(Target) -> + {ok, Ring} = riak_core_ring_manager:get_my_ring(), + riak_core_repair:gen_filter(Target, + Ring, + bucket_nval_map(Ring), + default_object_nval(), + fun object_info/1). + + %% VNode callbacks init([Index]) -> @@ -207,11 +228,11 @@ init([Index]) -> BucketBufSize = app_helper:get_env(riak_kv, bucket_buffer_size, 1000), IndexBufSize = app_helper:get_env(riak_kv, index_buffer_size, 100), KeyBufSize = app_helper:get_env(riak_kv, key_buffer_size, 100), + WorkerPoolSize = app_helper:get_env(riak_kv, worker_pool_size, 10), {ok, VId} = get_vnodeid(Index), DeleteMode = app_helper:get_env(riak_kv, delete_mode, 3000), AsyncFolding = app_helper:get_env(riak_kv, async_folds, true) == true, - case catch Mod:start(Index, [{async_folds, AsyncFolding}, - Configuration]) of + case catch Mod:start(Index, Configuration) of {ok, ModState} -> %% Get the backend capabilities State = #state{idx=Index, @@ -227,7 +248,7 @@ init([Index]) -> case AsyncFolding of true -> %% Create worker pool initialization tuple - FoldWorkerPool = {pool, riak_kv_worker, 10, []}, + FoldWorkerPool = {pool, riak_kv_worker, WorkerPoolSize, []}, {ok, State, [FoldWorkerPool]}; false -> {ok, State} @@ -251,18 +272,14 @@ handle_command(?KV_PUT_REQ{bkey=BKey, start_time=StartTime, options=Options}, Sender, State=#state{idx=Idx}) -> - riak_kv_mapred_cache:eject(BKey), + StartTS = os:timestamp(), riak_core_vnode:reply(Sender, {w, Idx, ReqId}), UpdState = do_put(Sender, BKey, Object, ReqId, StartTime, Options, State), + update_vnode_stats(vnode_put, Idx, StartTS), {noreply, UpdState}; handle_command(?KV_GET_REQ{bkey=BKey,req_id=ReqId},Sender,State) -> do_get(Sender, BKey, ReqId, State); -handle_command(?KV_MGET_REQ{bkeys=BKeys, req_id=ReqId, from=From}, _Sender, State) -> - do_mget(From, BKeys, ReqId, State); -handle_command(#riak_kv_listkeys_req_v1{bucket=Bucket, req_id=ReqId}, _Sender, - State=#state{mod=Mod, modstate=ModState, idx=Idx}) -> - do_legacy_list_bucket(ReqId,Bucket,Mod,ModState,Idx,State); handle_command(#riak_kv_listkeys_req_v2{bucket=Input, req_id=ReqId, caller=Caller}, _Sender, State=#state{async_folding=AsyncFolding, key_buf_size=BufferSize, @@ -407,68 +424,61 @@ handle_coverage(#riak_kv_listkeys_req_v3{bucket=Bucket, FilterVNodes, Sender, State) -> %% v3 == no backpressure ResultFun = result_fun(Bucket, Sender), - handle_coverage_listkeys(Bucket, ItemFilter, ResultFun, - FilterVNodes, Sender, State); + Opts = [{bucket, Bucket}], + handle_coverage_keyfold(Bucket, ItemFilter, ResultFun, + FilterVNodes, Sender, Opts, State); handle_coverage(?KV_LISTKEYS_REQ{bucket=Bucket, item_filter=ItemFilter}, FilterVNodes, Sender, State) -> %% v4 == ack-based backpressure ResultFun = result_fun_ack(Bucket, Sender), - handle_coverage_listkeys(Bucket, ItemFilter, ResultFun, - FilterVNodes, Sender, State); + Opts = [{bucket, Bucket}], + handle_coverage_keyfold(Bucket, ItemFilter, ResultFun, + FilterVNodes, Sender, Opts, State); +handle_coverage(#riak_kv_index_req_v1{bucket=Bucket, + item_filter=ItemFilter, + qry=Query}, + FilterVNodes, Sender, State) -> + %% v1 == no backpressure + handle_coverage_index(Bucket, ItemFilter, Query, + FilterVNodes, Sender, State, fun result_fun/2); handle_coverage(?KV_INDEX_REQ{bucket=Bucket, item_filter=ItemFilter, qry=Query}, - FilterVNodes, - Sender, - State=#state{async_folding=AsyncFolding, - idx=Index, - index_buf_size=BufferSize, - mod=Mod, - modstate=ModState}) -> - + FilterVNodes, Sender, State) -> + %% v2 = ack-based backpressure + handle_coverage_index(Bucket, ItemFilter, Query, + FilterVNodes, Sender, State, fun result_fun_ack/2). + +handle_coverage_index(Bucket, ItemFilter, Query, + FilterVNodes, Sender, + State=#state{mod=Mod, + modstate=ModState}, + ResultFunFun) -> {ok, Capabilities} = Mod:capabilities(Bucket, ModState), IndexBackend = lists:member(indexes, Capabilities), - AsyncBackend = lists:member(async_fold, Capabilities), case IndexBackend of true -> %% Update stats... riak_kv_stat:update(vnode_index_read), - %% Construct the filter function - FilterVNode = proplists:get_value(Index, FilterVNodes), - Filter = riak_kv_coverage_filter:build_filter(Bucket, ItemFilter, FilterVNode), - BufferMod = riak_kv_fold_buffer, - Buffer = BufferMod:new(BufferSize, result_fun(Bucket, Sender)), - FoldFun = fold_fun(keys, BufferMod, Filter), - FinishFun = finish_fun(BufferMod, Sender), - case AsyncFolding andalso AsyncBackend of - true -> - Opts = [async_fold, - {index, Bucket, Query}, - {bucket, Bucket}]; - false -> - Opts = [{index, Bucket, Query}, - {bucket, Bucket}] - end, - case list(FoldFun, FinishFun, Mod, fold_keys, ModState, Opts, Buffer) of - {async, AsyncWork} -> - {async, {fold, AsyncWork, FinishFun}, Sender, State}; - _ -> - {noreply, State} - end; + ResultFun = ResultFunFun(Bucket, Sender), + Opts = [{index, Bucket, Query}, + {bucket, Bucket}], + handle_coverage_keyfold(Bucket, ItemFilter, ResultFun, + FilterVNodes, Sender, Opts, State); false -> {reply, {error, {indexes_not_supported, Mod}}, State} end. -%% Convenience for handling both v3 and v4 coverage-based listkeys -handle_coverage_listkeys(Bucket, ItemFilter, ResultFun, - FilterVNodes, Sender, - State=#state{async_folding=AsyncFolding, - idx=Index, - key_buf_size=BufferSize, - mod=Mod, - modstate=ModState}) -> +%% Convenience for handling both v3 and v4 coverage-based key fold operations +handle_coverage_keyfold(Bucket, ItemFilter, ResultFun, + FilterVNodes, Sender, Opts0, + State=#state{async_folding=AsyncFolding, + idx=Index, + key_buf_size=BufferSize, + mod=Mod, + modstate=ModState}) -> %% Construct the filter function FilterVNode = proplists:get_value(Index, FilterVNodes), Filter = riak_kv_coverage_filter:build_filter(Bucket, ItemFilter, FilterVNode), @@ -480,9 +490,9 @@ handle_coverage_listkeys(Bucket, ItemFilter, ResultFun, AsyncBackend = lists:member(async_fold, Capabilities), case AsyncFolding andalso AsyncBackend of true -> - Opts = [async_fold, {bucket, Bucket}]; + Opts = [async_fold | Opts0]; false -> - Opts = [{bucket, Bucket}] + Opts = Opts0 end, case list(FoldFun, FinishFun, Mod, fold_keys, ModState, Opts, Buffer) of {async, AsyncWork} -> @@ -608,15 +618,13 @@ do_put(Sender, {Bucket,_Key}=BKey, RObj, ReqID, StartTime, Options, State) -> {Reply, UpdState} = perform_put(PrepPutRes, State, UpdPutArgs), riak_core_vnode:reply(Sender, Reply), - update_index_write_stats(UpdPutArgs#putargs.index_specs), - riak_kv_stat:update(vnode_put), + update_index_write_stats(UpdPutArgs#putargs.is_index, UpdPutArgs#putargs.index_specs), UpdState. do_backend_delete(BKey, RObj, State = #state{mod = Mod, modstate = ModState}) -> %% object is a tombstone or all siblings are tombstones - riak_kv_mapred_cache:eject(BKey), - %% Calculate the index specs to remove... + %% Calculate the index specs to remove... %% JDM: This should just be a tombstone by this point, but better %% safe than sorry. IndexSpecs = riak_object:diff_index_specs(undefined, RObj), @@ -651,7 +659,7 @@ prepare_put(State=#state{vnodeid=VId, case LWW andalso not IndexBackend of true -> ObjToStore = riak_object:increment_vclock(RObj, VId, StartTime), - {{true, ObjToStore}, PutArgs}; + {{true, ObjToStore}, PutArgs#putargs{is_index = false}}; false -> prepare_put(State, PutArgs, IndexBackend) end. @@ -680,7 +688,7 @@ prepare_put(#state{vnodeid=VId, false -> RObj end, - {{true, ObjToStore}, PutArgs#putargs{index_specs=IndexSpecs}}; + {{true, ObjToStore}, PutArgs#putargs{index_specs=IndexSpecs, is_index=IndexBackend}}; {ok, Val, _UpdModState} -> OldObj = binary_to_term(Val), case put_merge(Coord, LWW, OldObj, RObj, VId, StartTime) of @@ -708,7 +716,7 @@ prepare_put(#state{vnodeid=VId, % BProps)) % end, {{true, AMObj}, - PutArgs#putargs{index_specs=IndexSpecs}} + PutArgs#putargs{index_specs=IndexSpecs, is_index=IndexBackend}} end end. @@ -808,23 +816,11 @@ put_merge(true, false, CurObj, UpdObj, VId, _StartTime) -> %% @private do_get(_Sender, BKey, ReqID, State=#state{idx=Idx,mod=Mod,modstate=ModState}) -> + StartTS = os:timestamp(), Retval = do_get_term(BKey, Mod, ModState), - riak_kv_stat:update(vnode_get), + update_vnode_stats(vnode_get, Idx, StartTS), {reply, {r, Retval, Idx, ReqID}, State}. -do_mget({fsm, Sender}, BKeys, ReqId, State=#state{idx=Idx, mod=Mod, modstate=ModState}) -> - F = fun(BKey) -> - R = do_get_term(BKey, Mod, ModState), - case R of - {ok, Obj} -> - gen_fsm:send_event(Sender, {r, Obj, Idx, ReqId}); - _ -> - gen_fsm:send_event(Sender, {r, {R, BKey}, Idx, ReqId}) - end, - riak_kv_stat:update(vnode_get) end, - [F(BKey) || BKey <- BKeys], - {noreply, State}. - %% @private do_get_term(BKey, Mod, ModState) -> case do_get_binary(BKey, Mod, ModState) of @@ -930,29 +926,6 @@ finish_fold(BufferMod, Buffer, Sender) -> BufferMod:flush(Buffer), riak_core_vnode:reply(Sender, done). -%% @private -%% @deprecated This function is only here to support -%% rolling upgrades and will be removed. -do_legacy_list_bucket(ReqID,'_',Mod,ModState,Idx,State) -> - FoldBucketsFun = - fun(Bucket, Buf) -> - [Bucket | Buf] - end, - RetVal = Mod:fold_buckets(FoldBucketsFun, [], [], ModState), - {reply, {kl, RetVal, Idx, ReqID}, State}; -do_legacy_list_bucket(ReqID,Bucket,Mod,ModState,Idx,State) -> - FoldKeysFun = - fun(_, Key, Buf) -> - [Key | Buf] - end, - Opts = [{bucket, Bucket}], - case Mod:fold_keys(FoldKeysFun, [], Opts, ModState) of - {ok, RetVal} -> - {reply, {kl, RetVal, Idx, ReqID}, State}; - {error, Reason} -> - {reply, {error, Reason, ReqID}, State} - end. - %% @private do_delete(BKey, ReqId, State) -> Mod = State#state.mod, @@ -974,7 +947,7 @@ do_delete(BKey, ReqId, State) -> UpdState = do_backend_delete(BKey, RObj, State), {reply, {del, Idx, ReqId}, UpdState}; Delay when is_integer(Delay) -> - erlang:send_after(Delay, self(), + erlang:send_after(Delay, self(), {final_delete, BKey, delete_hash(RObj)}), %% Nothing checks these messages - will just reply @@ -1029,7 +1002,9 @@ do_get_vclock({Bucket, Key}, Mod, ModState) -> %% upon receipt of a handoff datum, there is no client FSM do_diffobj_put({Bucket, Key}, DiffObj, _StateData=#state{mod=Mod, - modstate=ModState}) -> + modstate=ModState, + idx=Idx}) -> + StartTS = os:timestamp(), {ok, Capabilities} = Mod:capabilities(Bucket, ModState), IndexBackend = lists:member(indexes, Capabilities), case Mod:get(Bucket, Key, ModState) of @@ -1044,8 +1019,8 @@ do_diffobj_put({Bucket, Key}, DiffObj, Res = Mod:put(Bucket, Key, IndexSpecs, Val, ModState), case Res of {ok, _UpdModState} -> - update_index_write_stats(IndexSpecs), - riak_kv_stat:update(vnode_put); + update_index_write_stats(IndexBackend, IndexSpecs), + update_vnode_stats(vnode_put, Idx, StartTS); _ -> nop end, Res; @@ -1069,8 +1044,8 @@ do_diffobj_put({Bucket, Key}, DiffObj, Res = Mod:put(Bucket, Key, IndexSpecs, Val, ModState), case Res of {ok, _UpdModState} -> - update_index_write_stats(IndexSpecs), - riak_kv_stat:update(vnode_put); + update_index_write_stats(IndexBackend, IndexSpecs), + update_vnode_stats(vnode_put, Idx, StartTS); _ -> nop end, @@ -1177,9 +1152,16 @@ wait_for_vnode_status_results(PrefLists, ReqId, Acc) -> wait_for_vnode_status_results(PrefLists, ReqId, Acc) end. +%% @private +-spec update_vnode_stats(vnode_get | vnode_put, partition(), erlang:timestamp()) -> + ok. +update_vnode_stats(Op, Idx, StartTS) -> + riak_kv_stat:update({Op, Idx, timer:now_diff( os:timestamp(), StartTS)}). %% @private -update_index_write_stats(IndexSpecs) -> +update_index_write_stats(false, _IndexSpecs) -> + ok; +update_index_write_stats(true, IndexSpecs) -> {Added, Removed} = count_index_specs(IndexSpecs), riak_kv_stat:update({vnode_index_write, Added, Removed}). @@ -1200,6 +1182,20 @@ count_index_specs(IndexSpecs) -> end, lists:foldl(F, {0, 0}, IndexSpecs). +%% @private +bucket_nval_map(Ring) -> + [{riak_core_bucket:name(B), riak_core_bucket:n_val(B)} || + B <- riak_core_bucket:get_buckets(Ring)]. + +%% @private +default_object_nval() -> + riak_core_bucket:n_val(riak_core_config:default_bucket_props()). + +%% @private +object_info({Bucket, _Key}=BKey) -> + Hash = riak_core_util:chash_key(BKey), + {Bucket, Hash}. + -ifdef(TEST). @@ -1247,8 +1243,9 @@ assign_vnodeid_restart_earlier_ts_test() -> vnode_status_test_() -> {setup, fun() -> - os:cmd("chmod u+rwx kv_vnode_status_test"), - os:cmd("rm -rf kv_vnode_status_test"), + filelib:ensure_dir("kv_vnode_status_test/.test"), + ?cmd("chmod u+rwx kv_vnode_status_test"), + ?cmd("rm -rf kv_vnode_status_test"), application:set_env(riak_kv, vnode_status, "kv_vnode_status_test"), ok end, @@ -1331,17 +1328,19 @@ backend_with_known_key(BackendMod) -> S1), {S2, B, K}. -must_be_first_setup_stuff_test() -> - application:start(sasl), - erlang:put({?MODULE, kv}, application:get_all_env(riak_kv)). - list_buckets_test_() -> {foreach, fun() -> application:start(sasl), - application:get_all_env(riak_kv) + Env = application:get_all_env(riak_kv), + application:start(folsom), + riak_core_stat_cache:start_link(), + riak_kv_stat:register_stats(), + Env end, fun(Env) -> + riak_core_stat_cache:stop(), + application:stop(folsom), application:stop(sasl), [application:unset_env(riak_kv, K) || {K, _V} <- application:get_all_env(riak_kv)], @@ -1411,11 +1410,6 @@ filter_keys_test() -> flush_msgs(). -must_be_last_cleanup_stuff_test() -> - [application:unset_env(riak_kv, K) || - {K, _V} <- application:get_all_env(riak_kv)], - [application:set_env(riak_kv, K, V) || {K, V} <- erlang:get({?MODULE, kv})]. - new_result_listener(Type) -> case Type of buckets -> diff --git a/src/riak_kv_wm_buckets.erl b/src/riak_kv_wm_buckets.erl index 4f05a7cacf..a4942f0711 100644 --- a/src/riak_kv_wm_buckets.erl +++ b/src/riak_kv_wm_buckets.erl @@ -35,6 +35,7 @@ -export([ init/1, service_available/2, + forbidden/2, content_types_provided/2, encodings_provided/2, produce_bucket_list/2 @@ -87,6 +88,8 @@ service_available(RD, Ctx=#ctx{riak=RiakProps}) -> Ctx} end. +forbidden(RD, Ctx) -> + {riak_kv_wm_utils:is_forbidden(RD), RD, Ctx}. %% @spec content_types_provided(reqdata(), context()) -> %% {[{ContentType::string(), Producer::atom()}], reqdata(), context()} diff --git a/src/riak_kv_wm_index.erl b/src/riak_kv_wm_index.erl index f992400ed9..5b22eca4cf 100644 --- a/src/riak_kv_wm_index.erl +++ b/src/riak_kv_wm_index.erl @@ -33,6 +33,7 @@ -export([ init/1, service_available/2, + forbidden/2, malformed_request/2, content_types_provided/2, encodings_provided/2, @@ -74,6 +75,9 @@ service_available(RD, Ctx=#ctx{riak=RiakProps}) -> Ctx} end. +forbidden(RD, Ctx) -> + {riak_kv_wm_utils:is_forbidden(RD), RD, Ctx}. + %% @spec malformed_request(reqdata(), context()) -> %% {boolean(), reqdata(), context()} %% @doc Determine whether query parameters are badly-formed. @@ -86,7 +90,7 @@ malformed_request(RD, Ctx) -> Args1 = wrq:path_tokens(RD), Args2 = [list_to_binary(riak_kv_wm_utils:maybe_decode_uri(RD, X)) || X <- Args1], - case to_index_query(IndexField, Args2) of + case riak_index:to_index_query(IndexField, Args2) of {ok, Query} -> %% Request is valid. NewCtx = Ctx#ctx{ @@ -139,37 +143,3 @@ produce_index_results(RD, Ctx) -> {{error, Reason}, RD, Ctx} end. - -%% @private -%% @spec to_index_op_query(binary(), [binary()]) -> -%% {ok, {atom(), binary(), list(binary())}} | {error, Reasons}. -%% @doc Given an IndexOp, IndexName, and Args, construct and return a -%% valid query, or a list of errors if the query is malformed. -to_index_query(IndexField, Args) -> - %% Normalize the index field... - IndexField1 = riak_index:normalize_index_field(IndexField), - - %% Normalize the arguments... - case riak_index:parse_fields([{IndexField1, X} || X <- Args]) of - {ok, []} -> - {error, {too_few_arguments, Args}}; - - {ok, [{_, Value}]} -> - %% One argument == exact match query - {ok, {eq, IndexField1, Value}}; - - {ok, [{_, Start}, {_, End}]} -> - %% Two arguments == range query - case End > Start of - true -> - {ok, {range, IndexField1, Start, End}}; - false -> - {error, {invalid_range, Args}} - end; - - {ok, _} -> - {error, {too_many_arguments, Args}}; - - {error, FailureReasons} -> - {error, FailureReasons} - end. diff --git a/src/riak_kv_wm_keylist.erl b/src/riak_kv_wm_keylist.erl index 76b4f96949..7894951f60 100644 --- a/src/riak_kv_wm_keylist.erl +++ b/src/riak_kv_wm_keylist.erl @@ -45,6 +45,7 @@ -export([ init/1, service_available/2, + forbidden/2, content_types_provided/2, encodings_provided/2, produce_bucket_body/2 @@ -100,6 +101,8 @@ service_available(RD, Ctx=#ctx{riak=RiakProps}) -> Ctx} end. +forbidden(RD, Ctx) -> + {riak_kv_wm_utils:is_forbidden(RD), RD, Ctx}. %% @spec content_types_provided(reqdata(), context()) -> diff --git a/src/riak_kv_wm_link_walker.erl b/src/riak_kv_wm_link_walker.erl index 64f1387e4a..a8b6649c5b 100644 --- a/src/riak_kv_wm_link_walker.erl +++ b/src/riak_kv_wm_link_walker.erl @@ -122,6 +122,7 @@ init/1, malformed_request/2, service_available/2, + forbidden/2, allowed_methods/2, content_types_provided/2, resource_exists/2, @@ -250,6 +251,9 @@ service_available(RD, Ctx=#ctx{riak=RiakProps}) -> Ctx} end. +forbidden(RD, Ctx) -> + {riak_kv_wm_utils:is_forbidden(RD), RD, Ctx}. + %% @spec allowed_methods(reqdata(), context()) -> %% {[method()], reqdata(), context()} %% @doc Get the list of methods this resource supports. @@ -287,8 +291,8 @@ resource_exists(RD, Ctx=#ctx{bucket=B, key=K, client=C}) -> %% @doc Execute the link walking query, and build the response body. %% This function has to explicitly set the Content-Type header, %% because Webmachine doesn't know to add the "boundary" parameter to it. -to_multipart_mixed(RD, Ctx=#ctx{linkquery=Query, start=Start, client=C}) -> - Results = execute_query(C, [Start], Query), +to_multipart_mixed(RD, Ctx=#ctx{linkquery=Query, start=Start}) -> + Results = execute_query([Start], Query), Boundary = riak_core_util:unique_id_62(), {multipart_mixed_encode(Results, Boundary, Ctx), %% reset content-type now that we now what it is @@ -297,7 +301,7 @@ to_multipart_mixed(RD, Ctx=#ctx{linkquery=Query, start=Start, client=C}) -> RD), Ctx}. -%% @spec execute_query(riak_client(), [riak_object()], [linkquery()]) -> +%% @spec execute_query([riak_object()], [linkquery()]) -> %% [[riak_object()]] %% @type linkquery() = {Bucket::binary()|'_', Tag::binary()|'_', Acc::boolean()} %% @doc Execute the link query. Return a list of link step results, @@ -307,36 +311,31 @@ to_multipart_mixed(RD, Ctx=#ctx{linkquery=Query, start=Start, client=C}) -> %% This function chops up the list of steps into segments of contiguous %% Acc==false steps. Acc==true requires an end to a map/reduce query in %% order to package up the results of that step for delivery to the client. -execute_query(_, _, []) -> []; -execute_query(C, StartObjects, [{Bucket, Tag, Acc}|RestQuery]) -> +execute_query(_, []) -> []; +execute_query(StartObjects, [{Bucket, Tag, Acc}|RestQuery]) -> StartLinks = lists:append([links(O, Bucket, Tag) || O <- StartObjects]), {SegResults,Leftover} = if Acc -> - {execute_segment(C, StartLinks, []), RestQuery}; + {execute_segment(StartLinks, []), RestQuery}; true -> {SafeQuery, [LastSafe|UnsafeQuery]} = lists:splitwith(fun({_,_,SegAcc}) -> not SegAcc end, RestQuery), - {execute_segment(C, StartLinks,SafeQuery++[LastSafe]), + {execute_segment(StartLinks,SafeQuery++[LastSafe]), UnsafeQuery} end, - [SegResults|execute_query(C,SegResults,Leftover)]. + [SegResults|execute_query(SegResults,Leftover)]. -%% @spec execute_segment(riak_client, [bkeytag()], [linkquery()]) -> +%% @spec execute_segment([bkeytag()], [linkquery()]) -> %% [riak_object()] %% @doc Execute a string of link steps, where only the last step's %% result will be kept for later. -execute_segment(C, Start, Steps) -> +execute_segment(Start, Steps) -> MR = [{link, Bucket, Key, false} || {Bucket, Key, _} <- Steps] ++[riak_kv_mapreduce:reduce_set_union(false), riak_kv_mapreduce:map_identity(true)], - case riak_kv_util:mapred_system() of - pipe -> - {ok, Objects} = riak_kv_mrc_pipe:mapred(Start, MR); - legacy -> - {ok, Objects} = C:mapred(Start, MR) - end, + {ok, Objects} = riak_kv_mrc_pipe:mapred(Start, MR), %% remove notfounds and strip link tags from objects lists:reverse( lists:foldl(fun({error, notfound}, Acc) -> Acc; diff --git a/src/riak_kv_wm_mapred.erl b/src/riak_kv_wm_mapred.erl index 4389f251f6..76037f2876 100644 --- a/src/riak_kv_wm_mapred.erl +++ b/src/riak_kv_wm_mapred.erl @@ -24,49 +24,37 @@ -module(riak_kv_wm_mapred). --export([init/1, service_available/2, allowed_methods/2]). +-export([init/1,allowed_methods/2, known_content_type/2, forbidden/2]). -export([malformed_request/2, process_post/2, content_types_provided/2]). -export([nop/2]). -include_lib("webmachine/include/webmachine.hrl"). -include_lib("riak_pipe/include/riak_pipe.hrl"). +-include("riak_kv_mrc_sink.hrl"). +-define(MAPRED_CTYPE, "application/json"). -define(DEFAULT_TIMEOUT, 60000). --record(state, {client, inputs, timeout, mrquery, boundary}). +-record(state, {inputs, timeout, mrquery, boundary}). +-type state() :: #state{}. init(_) -> - {ok, undefined}. + {ok, #state{}}. -service_available(RD, State) -> - case riak:local_client() of - {ok, Client} -> - {true, RD, #state{client=Client}}; - Error -> - lager:error("~s", Error), - {false, RD, State} - end. +forbidden(RD, State) -> + {riak_kv_wm_utils:is_forbidden(RD), RD, State}. allowed_methods(RD, State) -> {['GET','HEAD','POST'], RD, State}. +-spec known_content_type(wrq:reqdata(), state()) -> + {boolean(), wrq:reqdata(), state()}. +known_content_type(RD, State) -> + {ctype_ok(RD), RD, State}. + malformed_request(RD, State) -> - {Verified, Message, NewState} = - case {wrq:method(RD), wrq:req_body(RD)} of - {'POST', Body} when Body /= undefined -> - verify_body(Body, State); - _ -> - {false, usage(), State} - end, - {not Verified, - if Verified -> RD; - true -> - wrq:set_resp_header( - "Content-Type", "text/plain", - wrq:set_resp_body(Message, RD)) - end, - NewState}. + check_body(RD, State). content_types_provided(RD, State) -> {[{"application/json", nop}], RD, State}. @@ -75,12 +63,7 @@ nop(RD, State) -> {usage(), RD, State}. process_post(RD, State) -> - case riak_kv_util:mapred_system() of - pipe -> - pipe_mapred(RD, State); - legacy -> - legacy_mapred(RD, State) - end. + pipe_mapred(RD, State). %% Internal functions send_error(Error, RD) -> @@ -96,6 +79,47 @@ format_error({error, Error}) when is_list(Error) -> format_error(_Error) -> mochijson2:encode({struct, [{error, map_reduce_error}]}). +-spec ctype_ok(wrq:reqdata()) -> boolean(). +%% @doc Return true if the content type from +%% this request is appropriate. +ctype_ok(RD) -> + valid_ctype(get_base_ctype(RD)). + +-spec get_base_ctype(wrq:reqdata()) -> string(). +%% @doc Return the "base" content-type, that +%% is, not including the subtype parameters +get_base_ctype(RD) -> + base_type(wrq:get_req_header("content-type", RD)). + +-spec base_type(string()) -> string(). +%% @doc Return the base media type +base_type(CType) -> + {BaseType, _SubTypeParameters} = mochiweb_util:parse_header(CType), + BaseType. + +-spec valid_ctype(string()) -> boolean(). +%% @doc Return true if the base content type +%% is equivalent to ?MAPRED_CTYPE +valid_ctype(?MAPRED_CTYPE) -> true; +valid_ctype(_Ctype) -> false. + +check_body(RD, State) -> + {Verified, Message, NewState} = + case {wrq:method(RD), wrq:req_body(RD)} of + {'POST', Body} when Body /= undefined -> + verify_body(Body, State); + _ -> + {false, usage(), State} + end, + {not Verified, + if Verified -> RD; + true -> + wrq:set_resp_header( + "Content-Type", "text/plain", + wrq:set_resp_body(Message, RD)) + end, + NewState}. + verify_body(Body, State) -> case riak_kv_mapred_json:parse_request(Body) of {ok, ParsedInputs, ParsedQuery, Timeout} -> @@ -130,44 +154,33 @@ usage() -> " \"query\":[...list of map/reduce phases...]\n" "}\n". -is_key_filter({Bucket, Filters}) when is_binary(Bucket), - is_list(Filters) -> - true; -is_key_filter(_) -> - false. - %% PIPE MAPRED pipe_mapred(RD, #state{inputs=Inputs, mrquery=Query, timeout=Timeout}=State) -> - try riak_kv_mrc_pipe:mapred_stream(Query) of - {{ok, Pipe}, NumKeeps} -> - PipeRef = (Pipe#pipe.sink)#fitting.ref, - erlang:send_after(Timeout, self(), {pipe_timeout, PipeRef}), - {InputSender, SenderMonitor} = - riak_kv_mrc_pipe:send_inputs_async(Pipe, Inputs), + case riak_kv_mrc_pipe:mapred_stream_sink(Inputs, Query, Timeout) of + {ok, Mrc} -> case wrq:get_qs_value("chunked", "false", RD) of "true" -> - pipe_mapred_chunked(RD, State, Pipe, - {InputSender, SenderMonitor}); + pipe_mapred_chunked(RD, State, Mrc); _ -> - pipe_mapred_nonchunked(RD, State, Pipe, NumKeeps, - {InputSender, SenderMonitor}) - end - catch throw:{badarg, Fitting, Reason} -> + pipe_mapred_nonchunked(RD, State, Mrc) + end; + {error, {Fitting, Reason}} -> {{halt, 400}, send_error({error, [{phase, Fitting}, {error, iolist_to_binary(Reason)}]}, RD), State} end. -pipe_mapred_nonchunked(RD, State, Pipe, NumKeeps, Sender) -> - case pipe_collect_outputs(Pipe, NumKeeps, Sender) of + +pipe_mapred_nonchunked(RD, State, Mrc) -> + case riak_kv_mrc_pipe:collect_sink(Mrc) of {ok, Results} -> JSONResults = - case NumKeeps < 2 of + case Mrc#mrc_ctx.keeps < 2 of true -> [riak_kv_mapred_json:jsonify_not_found(R) || R <- Results]; @@ -178,68 +191,29 @@ pipe_mapred_nonchunked(RD, State, Pipe, NumKeeps, Sender) -> end, HasMRQuery = State#state.mrquery /= [], JSONResults1 = riak_kv_mapred_json:jsonify_bkeys(JSONResults, HasMRQuery), + riak_kv_mrc_pipe:cleanup_sink(Mrc), {true, wrq:set_resp_body(mochijson2:encode(JSONResults1), RD), State}; - {error, {sender_error, Error}} -> + {error, {sender_died, Error}} -> %% the sender links to the builder, so the builder has %% already been torn down - prevent_keepalive(), + riak_kv_mrc_pipe:cleanup_sink(Mrc), + {{halt, 500}, send_error(Error, RD), State}; + {error, {sink_died, Error}} -> + %% pipe monitors the sink, so the sink death has already + %% detroyed the pipe + riak_kv_mrc_pipe:cleanup_sink(Mrc), {{halt, 500}, send_error(Error, RD), State}; {error, timeout} -> - %% destroying the pipe will tear down the linked sender - riak_pipe:destroy(Pipe), - prevent_keepalive(), + riak_kv_mrc_pipe:destroy_sink(Mrc), {{halt, 500}, send_error({error, timeout}, RD), State}; - {error, Error} -> - riak_pipe:destroy(Pipe), - prevent_keepalive(), + {error, {_From, {Error, _Input}}} -> + riak_kv_mrc_pipe:destroy_sink(Mrc), {{halt, 500}, send_error({error, Error}, RD), State} end. -pipe_collect_outputs(Pipe, NumKeeps, Sender) -> - Ref = (Pipe#pipe.sink)#fitting.ref, - case pipe_collect_outputs1(Ref, Sender, []) of - {ok, Outputs} -> - {ok, riak_kv_mrc_pipe:group_outputs(Outputs, NumKeeps)}; - Error -> - Error - end. - -pipe_collect_outputs1(Ref, Sender, Acc) -> - case pipe_receive_output(Ref, Sender) of - {ok, Output} -> pipe_collect_outputs1(Ref, Sender, [Output|Acc]); - eoi -> {ok, lists:reverse(Acc)}; - Error -> Error - end. - -pipe_receive_output(Ref, {SenderPid, SenderRef}) -> - receive - #pipe_eoi{ref=Ref} -> - eoi; - #pipe_result{ref=Ref, from=From, result=Result} -> - {ok, {From, Result}}; - #pipe_log{ref=Ref, from=From, msg=Msg} -> - case Msg of - {trace, [error], {error, Info}} -> - {error, riak_kv_mapred_json:jsonify_pipe_error( - From, Info)}; - _ -> - %% not a log message we're interested in - pipe_receive_output(Ref, {SenderPid, SenderRef}) - end; - {'DOWN', SenderRef, process, SenderPid, Reason} -> - if Reason == normal -> - %% just done sending inputs, nothing to worry about - pipe_receive_output(Ref, {SenderPid, SenderRef}); - true -> - {error, {sender_error, Reason}} - end; - {pipe_timeout, Ref} -> - {error, timeout} - end. - -pipe_mapred_chunked(RD, State, Pipe, Sender) -> +pipe_mapred_chunked(RD, State, Mrc) -> Boundary = riak_core_util:unique_id_62(), CTypeRD = wrq:set_resp_header( "Content-Type", @@ -247,134 +221,62 @@ pipe_mapred_chunked(RD, State, Pipe, Sender) -> RD), BoundaryState = State#state{boundary=Boundary}, Streamer = pipe_stream_mapred_results( - CTypeRD, Pipe, BoundaryState, Sender), + CTypeRD, BoundaryState, Mrc), {true, wrq:set_resp_body({stream, Streamer}, CTypeRD), BoundaryState}. -pipe_stream_mapred_results(RD, Pipe, +pipe_stream_mapred_results(RD, #state{boundary=Boundary}=State, - Sender) -> - case pipe_receive_output((Pipe#pipe.sink)#fitting.ref, Sender) of - {ok, {PhaseId, Result}} -> - %% results come out of pipe one - %% at a time but they're supposed to - %% be in a list at the client end - JSONResults = [riak_kv_mapred_json:jsonify_not_found(Result)], - HasMRQuery = State#state.mrquery /= [], - JSONResults1 = riak_kv_mapred_json:jsonify_bkeys(JSONResults, HasMRQuery), - Data = mochijson2:encode({struct, [{phase, PhaseId}, - {data, JSONResults1}]}), - Body = ["\r\n--", Boundary, "\r\n", - "Content-Type: application/json\r\n\r\n", - Data], - {iolist_to_binary(Body), - fun() -> pipe_stream_mapred_results(RD, Pipe, State, Sender) end}; - eoi -> - {iolist_to_binary(["\r\n--", Boundary, "--\r\n"]), done}; - {error, timeout} -> - riak_pipe:destroy(Pipe), - prevent_keepalive(), + Mrc) -> + case riak_kv_mrc_pipe:receive_sink(Mrc) of + {ok, Done, Outputs} -> + BodyA = case Outputs of + [] -> + []; + _ -> + HasMRQuery = State#state.mrquery /= [], + [ result_part(O, HasMRQuery, Boundary) + || O <- Outputs ] + end, + {BodyB,Next} = case Done of + true -> + riak_kv_mrc_pipe:cleanup_sink(Mrc), + {iolist_to_binary( + ["\r\n--", Boundary, "--\r\n"]), + done}; + false -> + {[], + fun() -> pipe_stream_mapred_results( + RD, State, Mrc) + end} + end, + {iolist_to_binary([BodyA,BodyB]), Next}; + {error, timeout, _} -> + riak_kv_mrc_pipe:destroy_sink(Mrc), {format_error({error, timeout}), done}; - {error, {sender_error, Error}} -> - prevent_keepalive(), + {error, {sender_died, Error}, _} -> + %% sender links to the builder, so the builder death has + %% already destroyed the pipe + riak_kv_mrc_pipe:cleanup_sink(Mrc), {format_error(Error), done}; - {error, {Error, _Input}} -> - riak_pipe:destroy(Pipe), - prevent_keepalive(), + {error, {sink_died, Error}, _} -> + %% pipe monitors the sink, so the sink death has already + %% detroyed the pipe + riak_kv_mrc_pipe:cleanup_sink(Mrc), + {format_error(Error), done}; + {error, {_From, {Error, _Input}}, _} -> + riak_kv_mrc_pipe:destroy_sink(Mrc), {format_error({error, Error}), done} end. -%% @doc Prevent this socket from being used for another HTTP request. -%% This is used to workaround an issue in mochiweb, where the loop -%% waiting for new TCP data receives a latent pipe message instead, -%% and blows up, sending a 400 to the requester. -%% -%% WARNING: This uses an undocumented feature of mochiweb that exists -%% in 1.5.1 (the version planned to ship with Riak 1.0). The feature -%% appears to still exist in mochiweb 2.2.1, but it may go away in -%% future mochiweb releases. -%% -%% See [https://issues.basho.com/1222] for more details. -prevent_keepalive() -> - erlang:put(mochiweb_request_force_close, true). - -%% LEGACY MAPRED - -legacy_mapred(RD, - #state{inputs=Inputs, - mrquery=Query, - timeout=Timeout}=State) -> - Me = self(), - {ok, Client} = riak:local_client(), - ResultTransformer = fun riak_kv_mapred_json:jsonify_not_found/1, - case wrq:get_qs_value("chunked", RD) of - "true" -> - {ok, ReqId} = - case is_binary(Inputs) orelse is_key_filter(Inputs) of - true -> - Client:mapred_bucket_stream(Inputs, Query, Me, ResultTransformer, Timeout); - false -> - if is_list(Inputs) -> - {ok, {RId, FSM}} = Client:mapred_stream(Query, Me, ResultTransformer, Timeout), - luke_flow:add_inputs(FSM, Inputs), - luke_flow:finish_inputs(FSM), - {ok, RId}; - is_tuple(Inputs) -> - {ok, {RId, FSM}} = Client:mapred_stream(Query, Me, ResultTransformer, Timeout), - Client:mapred_dynamic_inputs_stream(FSM, Inputs, Timeout), - luke_flow:finish_inputs(FSM), - {ok, RId} - end - end, - Boundary = riak_core_util:unique_id_62(), - RD1 = wrq:set_resp_header("Content-Type", "multipart/mixed;boundary=" ++ Boundary, RD), - State1 = State#state{boundary=Boundary}, - {true, wrq:set_resp_body({stream, legacy_stream_mapred_results(RD1, ReqId, State1)}, RD1), State1}; - Param when Param =:= "false"; - Param =:= undefined -> - Results = case is_binary(Inputs) orelse is_key_filter(Inputs) of - true -> - Client:mapred_bucket(Inputs, Query, ResultTransformer, Timeout); - false -> - if is_list(Inputs) -> - Client:mapred(Inputs, Query, ResultTransformer, Timeout); - is_tuple(Inputs) -> - case Client:mapred_stream(Query,Me,ResultTransformer,Timeout) of - {ok, {ReqId, FlowPid}} -> - Client:mapred_dynamic_inputs_stream(FlowPid, Inputs, Timeout), - luke_flow:finish_inputs(FlowPid), - luke_flow:collect_output(ReqId, Timeout); - Error -> - Error - end - end - end, - RD1 = wrq:set_resp_header("Content-Type", "application/json", RD), - case Results of - "all nodes failed" -> - {{halt, 500}, wrq:set_resp_body("All nodes failed", RD), State}; - {error, _} -> - {{halt, 500}, send_error(Results, RD1), State}; - {ok, Result} -> - {true, wrq:set_resp_body(mochijson2:encode(Result), RD1), State} - end - end. - -legacy_stream_mapred_results(RD, ReqId, #state{timeout=Timeout}=State) -> - FinalTimeout = erlang:trunc(Timeout * 1.02), - receive - {flow_results, ReqId, done} -> {iolist_to_binary(["\r\n--", State#state.boundary, "--\r\n"]), done}; - {flow_results, ReqId, {error, Error}} -> - {format_error(Error), done}; - {flow_error, ReqId, Error} -> - {format_error({error, Error}), done}; - {flow_results, PhaseId, ReqId, Res} -> - Data = mochijson2:encode({struct, [{phase, PhaseId}, {data, Res}]}), - Body = ["\r\n--", State#state.boundary, "\r\n", - "Content-Type: application/json\r\n\r\n", - Data], - {iolist_to_binary(Body), fun() -> legacy_stream_mapred_results(RD, ReqId, State) end} - after FinalTimeout -> - {format_error({error, timeout}), done} - end. +result_part({PhaseId, Results}, HasMRQuery, Boundary) -> + Data = riak_kv_mapred_json:jsonify_bkeys( + [riak_kv_mapred_json:jsonify_not_found(R) + || R <- Results ], + HasMRQuery), + JSON = {struct, [{phase, PhaseId}, + {data, Data}]}, + ["\r\n--", Boundary, "\r\n", + "Content-Type: application/json\r\n\r\n", + mochijson2:encode(JSON)]. diff --git a/src/riak_kv_wm_object.erl b/src/riak_kv_wm_object.erl index 884c1bcdef..cefe076efe 100644 --- a/src/riak_kv_wm_object.erl +++ b/src/riak_kv_wm_object.erl @@ -99,6 +99,7 @@ -export([ init/1, service_available/2, + forbidden/2, allowed_methods/2, allow_missing_post/2, malformed_request/2, @@ -189,6 +190,9 @@ service_available(RD, Ctx=#ctx{riak=RiakProps}) -> Ctx} end. +forbidden(RD, Ctx) -> + {riak_kv_wm_utils:is_forbidden(RD), RD, Ctx}. + %% @spec allowed_methods(reqdata(), context()) -> %% {[method()], reqdata(), context()} %% @doc Get the list of methods this resource supports. @@ -605,7 +609,8 @@ accept_doc_body(RD, Ctx=#ctx{bucket=B, key=K, client=C, links=L, index_fields=IF %% Handle the no-sibling case. Just send the object. send_returnbody(RD, DocCtx, _HasSiblings = false) -> {Body, DocRD, DocCtx2} = produce_doc_body(RD, DocCtx), - {true, wrq:append_to_response_body(Body, DocRD), DocCtx2}; + {DocRD2, DocCtx3} = add_conditional_headers(DocRD, DocCtx2), + {true, wrq:append_to_response_body(Body, DocRD2), DocCtx3}; %% Handle the sibling case. Send either the sibling message body, or a %% multipart body, depending on what the client accepts. @@ -614,12 +619,25 @@ send_returnbody(RD, DocCtx, _HasSiblings = true) -> case webmachine_util:choose_media_type(["multipart/mixed", "text/plain"], AcceptHdr) of "multipart/mixed" -> {Body, DocRD, DocCtx2} = produce_multipart_body(RD, DocCtx), - {true, wrq:append_to_response_body(Body, DocRD), DocCtx2}; + {DocRD2, DocCtx3} = add_conditional_headers(DocRD, DocCtx2), + {true, wrq:append_to_response_body(Body, DocRD2), DocCtx3}; _ -> {Body, DocRD, DocCtx2} = produce_sibling_message_body(RD, DocCtx), - {true, wrq:append_to_response_body(Body, DocRD), DocCtx2} + {DocRD2, DocCtx3} = add_conditional_headers(DocRD, DocCtx2), + {true, wrq:append_to_response_body(Body, DocRD2), DocCtx3} end. +%% Add ETag and Last-Modified headers to responses that might not +%% necessarily include them, specifically when the client requests +%% returnbody on a PUT or POST. +add_conditional_headers(RD, Ctx) -> + {ETag, RD2, Ctx2} = generate_etag(RD, Ctx), + {LM, RD3, Ctx3} = last_modified(RD2, Ctx2), + RD4 = wrq:set_resp_header("ETag", webmachine_util:quoted_string(ETag), RD3), + RD5 = wrq:set_resp_header("Last-Modified", + httpd_util:rfc1123_date(calendar:universal_time_to_local_time(LM)), RD4), + {RD5,Ctx3}. + %% @spec extract_content_type(reqdata()) -> %% {ContentType::string(), Charset::string()|undefined} %% @doc Interpret the Content-Type header in the client's PUT request. @@ -839,35 +857,45 @@ delete_resource(RD, Ctx=#ctx{bucket=B, key=K, client=C, rw=RW, r=R, w=W, %% @spec generate_etag(reqdata(), context()) -> %% {undefined|string(), reqdata(), context()} %% @doc Get the etag for this resource. -%% Documents will have an etag equal to their vtag. No etag will be -%% given for documents with siblings, if no sibling was chosen with the -%% vtag query param. +%% Documents will have an etag equal to their vtag. For documents with +%% siblings when no vtag is specified, this will be an etag derived from +%% the vector clock. generate_etag(RD, Ctx) -> case select_doc(Ctx) of {MD, _} -> {dict:fetch(?MD_VTAG, MD), RD, Ctx}; multiple_choices -> - {undefined, RD, Ctx} + {ok, Doc} = Ctx#ctx.doc, + <> = crypto:md5(term_to_binary(riak_object:vclock(Doc))), + {riak_core_util:integer_to_list(ETag, 62), RD, Ctx} end. %% @spec last_modified(reqdata(), context()) -> %% {undefined|datetime(), reqdata(), context()} %% @doc Get the last-modified time for this resource. %% Documents will have the last-modified time specified by the riak_object. -%% No last-modified time will be given for documents with siblings, if no -%% sibling was chosen with the vtag query param. +%% For documents with siblings, this is the last-modified time of the latest +%% sibling. last_modified(RD, Ctx) -> case select_doc(Ctx) of {MD, _} -> - {case dict:fetch(?MD_LASTMOD, MD) of - Now={_,_,_} -> - calendar:now_to_universal_time(Now); - Rfc1123 when is_list(Rfc1123) -> - httpd_util:convert_request_date(Rfc1123) - end, - RD, Ctx}; + {normalize_last_modified(MD),RD, Ctx}; multiple_choices -> - {undefined, RD, Ctx} + {ok, Doc} = Ctx#ctx.doc, + LMDates = [ normalize_last_modified(MD) || + MD <- riak_object:get_metadatas(Doc) ], + {lists:max(LMDates), RD, Ctx} + end. + +%% @spec normalize_last_modified(dict()) -> calendar:datetime() +%% @doc Extract and convert the Last-Modified metadata into a normalized form +%% for use in the last_modified/2 callback. +normalize_last_modified(MD) -> + case dict:fetch(?MD_LASTMOD, MD) of + Now={_,_,_} -> + calendar:now_to_universal_time(Now); + Rfc1123 when is_list(Rfc1123) -> + httpd_util:convert_request_date(Rfc1123) end. %% @spec get_link_heads(reqdata(), context()) -> [link()] @@ -1006,12 +1034,12 @@ handle_common_error(Reason, RD, Ctx) -> [Returned, Requested]), RD)), Ctx}; - {error, {w_val_unsatisfied, Requested, Returned}} -> + {error, {w_val_unsatisfied, NumW, NumDW, W, DW}} -> {{halt, 503}, wrq:set_resp_header("Content-Type", "text/plain", wrq:append_to_response_body( - io_lib:format("W-value unsatisfied: ~p/~p~n", - [Returned, Requested]), + io_lib:format("W/DW-value unsatisfied: w=~p/~p dw=~p/~p~n", + [NumW, W, NumDW, DW]), RD)), Ctx}; {error, {pr_val_unsatisfied, Requested, Returned}} -> diff --git a/src/riak_kv_wm_props.erl b/src/riak_kv_wm_props.erl index 168f3a6297..bc173d16ed 100644 --- a/src/riak_kv_wm_props.erl +++ b/src/riak_kv_wm_props.erl @@ -44,6 +44,10 @@ %% {"props":{Prop:Val}} %% Where the "props" object takes the same form as returned from %% a GET of the same resource. +%% +%% DELETE /buckets/Bucket/props +%% Reset bucket properties back to the default settings +%% not supported by the OLD API -module(riak_kv_wm_props). @@ -51,6 +55,7 @@ -export([ init/1, service_available/2, + forbidden/2, allowed_methods/2, malformed_request/2, content_types_provided/2, @@ -58,7 +63,8 @@ content_types_accepted/2, produce_bucket_body/2, accept_bucket_body/2, - get_bucket_props_json/2 + get_bucket_props_json/2, + delete_resource/2 ]). %% @type context() = term() @@ -67,7 +73,8 @@ prefix, %% string() - prefix for resource uris riak, %% local | {node(), atom()} - params for riak client bucketprops, %% proplist() - properties of the bucket - method %% atom() - HTTP method for the request + method, %% atom() - HTTP method for the request + api_version %% non_neg_integer() - old or new http api }). -include_lib("webmachine/include/webmachine.hrl"). @@ -79,7 +86,8 @@ init(Props) -> {ok, #ctx{ prefix=proplists:get_value(prefix, Props), - riak=proplists:get_value(riak, Props) + riak=proplists:get_value(riak, Props), + api_version=proplists:get_value(api_version,Props) }}. %% @spec service_available(reqdata(), context()) -> @@ -108,12 +116,17 @@ service_available(RD, Ctx=#ctx{riak=RiakProps}) -> Ctx} end. +forbidden(RD, Ctx) -> + {riak_kv_wm_utils:is_forbidden(RD), RD, Ctx}. + %% @spec allowed_methods(reqdata(), context()) -> %% {[method()], reqdata(), context()} %% @doc Get the list of methods this resource supports. %% Properties allows HEAD, GET, and PUT. -allowed_methods(RD, Ctx) -> - {['HEAD', 'GET', 'PUT'], RD, Ctx}. +allowed_methods(RD, Ctx) when Ctx#ctx.api_version =:= 1 -> + {['HEAD', 'GET', 'PUT'], RD, Ctx}; +allowed_methods(RD, Ctx) when Ctx#ctx.api_version =:= 2 -> + {['HEAD', 'GET', 'PUT', 'DELETE'], RD, Ctx}. %% @spec malformed_request(reqdata(), context()) -> %% {boolean(), reqdata(), context()} @@ -204,6 +217,12 @@ accept_bucket_body(RD, Ctx=#ctx{bucket=B, client=C, bucketprops=Props}) -> {{halt, 400}, RD2, Ctx} end. +%% @spec delete_resource(reqdata(), context()) -> {boolean, reqdata(), context()} +%% @doc Reset the bucket properties back to the default values +delete_resource(RD, Ctx=#ctx{bucket=B, client=C}) -> + C:reset_bucket(B), + {true, RD, Ctx}. + %% @spec jsonify_bucket_prop({Property::atom(), erlpropvalue()}) -> %% {Property::binary(), jsonpropvalue()} %% @type erlpropvalue() = integer()|string()|boolean()| diff --git a/src/riak_kv_wm_stats.erl b/src/riak_kv_wm_stats.erl index 2bd189c18b..11540e054e 100644 --- a/src/riak_kv_wm_stats.erl +++ b/src/riak_kv_wm_stats.erl @@ -29,6 +29,7 @@ encodings_provided/2, content_types_provided/2, service_available/2, + forbidden/2, produce_body/2, pretty_print/2 ]). @@ -67,13 +68,10 @@ content_types_provided(ReqData, Context) -> service_available(ReqData, Ctx) -> - case app_helper:get_env(riak_kv, riak_kv_stat, false) of - false -> - {false, wrq:append_to_response_body("riak_kv_stat is disabled on this node.\n", ReqData), - Ctx}; - true -> - {true, ReqData, Ctx} - end. + {true, ReqData, Ctx}. + +forbidden(RD, Ctx) -> + {riak_kv_wm_utils:is_forbidden(RD), RD, Ctx}. produce_body(ReqData, Ctx) -> Body = mochijson2:encode({struct, get_stats()}), @@ -89,5 +87,3 @@ pretty_print(RD1, C1=#ctx{}) -> get_stats() -> proplists:delete(disk, riak_kv_stat:get_stats()) ++ riak_core_stat:get_stats(). - - diff --git a/src/riak_kv_wm_utils.erl b/src/riak_kv_wm_utils.erl index 9331ed7adc..c5459b43e1 100644 --- a/src/riak_kv_wm_utils.erl +++ b/src/riak_kv_wm_utils.erl @@ -35,7 +35,8 @@ encode_value/1, accept_value/2, any_to_list/1, - any_to_bool/1 + any_to_bool/1, + is_forbidden/1 ]). -include_lib("webmachine/include/webmachine.hrl"). @@ -233,3 +234,49 @@ any_to_bool(V) when is_integer(V) -> V /= 0; any_to_bool(V) when is_boolean(V) -> V. + +is_forbidden(RD) -> + is_null_origin(RD) or + (app_helper:get_env(riak_kv,secure_referer_check,true) and not is_valid_referer(RD)). + +%% @doc Check if the Origin header is "null". This is useful to look for attempts +%% at CSRF, but is not a complete answer to the problem. +is_null_origin(RD) -> + case wrq:get_req_header("Origin", RD) of + "null" -> + true; + _ -> + false + end. + +%% @doc Validate that the Referer matches up with scheme, host and port of the +%% machine that received the request. +is_valid_referer(RD) -> + OriginTuple = {wrq:scheme(RD), string:join(wrq:host_tokens(RD), "."), wrq:port(RD)}, + case referer_tuple(RD) of + undefined -> + true; + {invalid, Url} -> + lager:debug("WM unparsable referer: ~s\n", [Url]), + false; + OriginTuple -> + true; + RefererTuple -> + lager:debug("WM referrer not origin. Origin ~p != Referer ~p\n", [OriginTuple, RefererTuple]), + false + end. + +referer_tuple(RD) -> + case wrq:get_req_header("Referer", RD) of + undefined -> + undefined; + Url -> + case http_uri:parse(Url) of + {ok, {Scheme, _, Host, Port, _, _}} -> %R15+ + {Scheme, Host, Port}; + {Scheme, _, Host, Port, _, _} -> % R14 and below + {Scheme, Host, Port}; + {error, _} -> + {invalid, Url} + end + end. diff --git a/src/riak_kv_yessir_backend.erl b/src/riak_kv_yessir_backend.erl new file mode 100644 index 0000000000..0b5ea4e7df --- /dev/null +++ b/src/riak_kv_yessir_backend.erl @@ -0,0 +1,350 @@ +%% ------------------------------------------------------------------- +%% +%% riak_kv_yessir_backend: simulation backend for Riak +%% +%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +%% @doc riak_kv_yessir_backend is a backend for benchmarking Riak without +%% any disk I/O or RAM constraints. +%% +%% Riak: "Store this key/value pair." +%% Backend: "Yes, sir!" +%% Riak: "Get me that key/value pair." +%% Backend: "Yes, sir!" +%% +%% This backend uses zero disk resources and uses constant memory. +%% +%% * All put requests are immediately acknowledged 'ok'. No +%% data about the put request is stored. +%% * All get requests are fulfilled by creating a constant binary for +%% the value. No attempt is made to correlate get keys with +%% previously-put keys or to correlate get values with previously-put +%% values. +%% - Get operation keys that are formatted in with the convention +%% <<"yessir.{integer}.anything">> will use integer (interpreted in +%% base 10) as the returned binary's Size. +%% +%% fold_keys and fold_objects are implemented for both sync and async. +%% Each will return the same deterministic set of results for every call, +%% given the same set of configuration parameters. The size of the object +%% folded over is controlled by the "default_size" config var. The number +%% of keys folded over is controlled by the "key_count" config var. Folding +%% over the keys and objects will each return the same set of keys, so if +%% you fold over the keys and collect the list; and then you fold over the +%% objects and collect the list of keys again, the two lists will match. +%% +%% This backend is the Riak storage manager equivalent of: +%% +%% * cat > /dev/null +%% * cat < /dev/zero +%% +%% === Configuration Options === +%% +%% The following configuration options are available for the yessir backend. +%% The options should be specified in the `riak_kv' section of your +%% app.config file. +%% +%%
    +%%
  • `yessir_default_size' - The number of bytes of generated data for the value.
  • +%%
  • `yessir_key_count' - The number of keys that will be folded over, e.g. list_keys().
  • +%%
+%% +%% TODO list: +%% +%% * Add configuration option for random percent of not_found replies for get +%% - Anything non-zero would trigger read-repair, which could be useful +%% for some simulations. +%% * Is there a need for simulations for get to return different vclocks? +%% * Add variable latency before responding. This callback API is +%% synchronous, but adding constant- & uniform- & pareto-distributed +%% delays would simulate disk I/O latencies because all other backend +%% APIs are also synchronous. + +-module(riak_kv_yessir_backend). +-behavior(riak_kv_backend). + +%% KV Backend API +-export([api_version/0, + capabilities/1, + capabilities/2, + start/2, + stop/1, + get/3, + put/5, + delete/4, + drop/1, + fold_buckets/4, + fold_keys/4, + fold_objects/4, + is_empty/1, + status/1, + callback/3]). + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). +-endif. + +-define(API_VERSION, 1). +-define(CAPABILITIES, [async_fold]). + +-record(state, { + default_get = <<>>, + default_size = 0, + key_count = 0, + op_get = 0, + op_put = 0, + op_delete = 0 + }). +-type state() :: #state{}. +-type config() :: [{atom(), term()}]. + +%% =================================================================== +%% Public API +%% =================================================================== + +%% @doc Return the major version of the +%% current API. +-spec api_version() -> {ok, integer()}. +api_version() -> + {ok, ?API_VERSION}. + +%% @doc Return the capabilities of the backend. +-spec capabilities(state()) -> {ok, [atom()]}. +capabilities(_) -> + {ok, ?CAPABILITIES}. + +%% @doc Return the capabilities of the backend. +-spec capabilities(riak_object:bucket(), state()) -> {ok, [atom()]}. +capabilities(_, _) -> + {ok, ?CAPABILITIES}. + +%% @doc Start this backend, yes, sir! +-spec start(integer(), config()) -> {ok, state()} | {error, term()}. +start(_Partition, Config) -> + DefaultLen = case app_helper:get_prop_or_env( + yessir_default_size, Config, yessir_backend) of + undefined -> 1024; + Len -> Len + end, + KeyCount = case app_helper:get_prop_or_env( + yessir_key_count, Config, yessir_backend) of + undefined -> 1024; + Count -> Count + end, + {ok, #state{default_get = <<42:(DefaultLen*8)>>, + default_size = DefaultLen, + key_count = KeyCount}}. + +%% @doc Stop this backend, yes, sir! +-spec stop(state()) -> ok. +stop(_State) -> + ok. + +%% @doc Get a fake object, yes, sir! +-spec get(riak_object:bucket(), riak_object:key(), state()) -> + {ok, any(), state()}. +get(Bucket, Key, #state{op_get = Gets} = S) -> + Bin = case get_binsize(Key) of + undefined -> S#state.default_get; + N -> <<42:(N*8)>> + end, + Meta = dict:new(), + Meta1 = dict:store(<<"X-Riak-Last-Modified">>, erlang:now(), Meta), + Meta2 = dict:store(<<"X-Riak-VTag">>, make_vtag(erlang:now()), Meta1), + O = riak_object:increment_vclock(riak_object:new(Bucket, Key, Bin, Meta2), + <<"yessir!">>, 1), + {ok, term_to_binary(O), S#state{op_get = Gets + 1}}. + +%% @doc Store an object, yes, sir! +-type index_spec() :: {add, Index, SecondaryKey} | {remove, Index, SecondaryKey}. +-spec put(riak_object:bucket(), riak_object:key(), [index_spec()], binary(), state()) -> + {ok, state()}. +put(_Bucket, _PKey, _IndexSpecs, _Val, #state{op_put = Puts} = S) -> + {ok, S#state{op_put = Puts + 1}}. + +%% @doc Delete an object, yes, sir! +-spec delete(riak_object:bucket(), riak_object:key(), [index_spec()], state()) -> + {ok, state()}. +delete(_Bucket, _Key, _IndexSpecs, #state{op_delete = Deletes} = S) -> + {ok, S#state{op_delete = Deletes + 1}}. + +%% @doc Fold over all the buckets, yes, sir! +-spec fold_buckets(riak_kv_backend:fold_buckets_fun(), + any(), + [], + state()) -> {ok, any()}. +fold_buckets(_FoldBucketsFun, Acc, _Opts, _S) -> + {ok, Acc}. + +%% @doc Fold over all the keys for one or all buckets, yes, sir! +-spec fold_keys(riak_kv_backend:fold_keys_fun(), + any(), + [{atom(), term()}], + state()) -> {ok, term()}. +fold_keys(FoldKeysFun, Accum, Opts, State) -> + KeyCount = State#state.key_count, + BucketOpt = lists:keyfind(bucket, 1, Opts), + Folder = case BucketOpt of + {bucket, Bucket} -> + FoldFun = fold_keys_fun(FoldKeysFun, Bucket), + get_folder(FoldFun, Accum, KeyCount); + _ -> + FoldFun = fold_keys_fun(FoldKeysFun, <<"all">>), + get_folder(FoldFun, Accum, KeyCount) + end, + case lists:member(async_fold, Opts) of + true -> + {async, Folder}; + false -> + {ok, Folder()} + end. + +%% @doc Fold over all the objects for one or all buckets, yes, sir! +-spec fold_objects(riak_kv_backend:fold_objects_fun(), + any(), + [{atom(), term()}], + state()) -> {ok, any()} | {async, fun()}. +fold_objects(FoldObjectsFun, Accum, Opts, State) -> + KeyCount = State#state.key_count, + ValueSize = State#state.default_size, + BucketOpt = lists:keyfind(bucket, 1, Opts), + Folder = case BucketOpt of + {bucket, Bucket} -> + FoldFun = fold_objects_fun(FoldObjectsFun, Bucket, ValueSize), + get_folder(FoldFun, Accum, KeyCount); + _ -> + FoldFun = fold_objects_fun(FoldObjectsFun, <<"all">>, ValueSize), + get_folder(FoldFun, Accum, KeyCount) + end, + case lists:member(async_fold, Opts) of + true -> + {async, Folder}; + false -> + {ok, Folder()} + end. + +%% @doc Delete all objects from this backend, yes, sir! +-spec drop(state()) -> {ok, state()}. +drop(S) -> + {ok, S}. + +%% @doc Returns true if this bitcasks backend contains any +%% non-tombstone values; otherwise returns false. +-spec is_empty(state()) -> false. +is_empty(_S) -> + false. + +-spec status(state()) -> [{atom(), term()}]. +status(#state{op_put = Puts, op_get = Gets, op_delete = Deletes}) -> + [{puts, Puts}, {gets, Gets}, {deletes, Deletes}]. + +%% @doc Register an asynchronous callback +-spec callback(reference(), any(), state()) -> {ok, state()}. +callback(_Ref, _Whatever, S) -> + {ok, S}. + + +%% =================================================================== +%% Internal functions +%% =================================================================== + +get_folder(FoldFun, Acc, KeyCount) -> + fun() -> + fold_anything_fun(FoldFun, Acc, KeyCount) + end. + +key_of_integer(Range, State) -> + {N, S} = random:uniform_s(Range, State), + Key = integer_to_list(N) ++ ".1000", %% e.g. "10.1000" + BKey = list_to_binary(Key), %% e.g. <<"10.1000">> + {BKey, S}. + +value_for_random(VR, Size) -> + <>. + +fold_anything_fun(FoldFunc, Acc, KeyCount) -> + Range = 1000000, + KeyState = random:seed0(), + ValueState = random:seed0(), + all_keys_folder(FoldFunc, Acc, Range, {KeyState, ValueState}, KeyCount). + +all_keys_folder(FoldFunc, Acc, _Range, _S, 0) -> + FoldFunc(undefined, 0, Acc); +all_keys_folder(FoldFunc, Acc, Range, {KS,VS}, N) -> + {Key,KSS} = key_of_integer(Range, KS), + {VR,VSS} = random:uniform_s(255,VS), + Acc1 = FoldFunc(Key, VR, Acc), + all_keys_folder(FoldFunc, Acc1, Range, {KSS,VSS}, N-1). + +%% @private +%% Return a function to fold over keys on this backend +fold_keys_fun(FoldKeysFun, Bucket) -> + fun(Key, _VR, Acc) when Key /= undefined -> + FoldKeysFun(Bucket, Key, Acc); + (_, _, Acc) -> + Acc + end. + +%% @private +%% Return a function to fold over keys on this backend +fold_objects_fun(FoldObjectsFun, Bucket, Size) -> + fun(Key, VR, Acc) when Key /= undefined -> + Bin = value_for_random(VR, Size), + Meta = dict:new(), + Meta1 = dict:store(<<"X-Riak-Last-Modified">>, erlang:now(), Meta), + Meta2 = dict:store(<<"X-Riak-VTag">>, make_vtag(erlang:now()), Meta1), + O = riak_object:increment_vclock(riak_object:new(Bucket, Key, Bin, Meta2), + <<"yessir!">>, 1), + FoldObjectsFun(Bucket, Key, term_to_binary(O), Acc); + (_, _, Acc) -> + Acc + end. + +%% borrowed from kv get_fsm... +make_vtag(Now) -> + <> = crypto:md5(term_to_binary({node(), Now})), + riak_core_util:integer_to_list(HashAsNum,62). + +get_binsize(<<"yessir.", Rest/binary>>) -> + get_binsize(Rest, 0); +get_binsize(_) -> + undefined. + +get_binsize(<>, Val) when $0 =< X, X =< $9-> + get_binsize(Rest, (Val * 10) + (X - $0)); +get_binsize(_, Val) -> + Val. + +%% +%% Test +%% +-ifdef(USE_BROKEN_TESTS). +-ifdef(TEST). +simple_test() -> + Config = [], + riak_kv_backend:standard_test(?MODULE, Config). + +-ifdef(EQC). +eqc_test() -> + Cleanup = fun(_State,_Olds) -> ok end, + Config = [], + ?assertEqual(true, backend_eqc:test(?MODULE, false, Config, Cleanup)). +-endif. % EQC +-endif. % TEST +-endif. % USE_BROKEN_TESTS diff --git a/test/backend_eqc.erl b/test/backend_eqc.erl index ae3594cc83..6f9099cac4 100644 --- a/test/backend_eqc.erl +++ b/test/backend_eqc.erl @@ -40,6 +40,7 @@ -export([initial_state/0, initial_state_data/0, next_state_data/5, + dynamic_precondition/4, precondition/4, postcondition/5]). @@ -52,16 +53,18 @@ %% Helpers -export([drop/2, + delete/5, init_backend/3]). --define(TEST_ITERATIONS, 50). +-define(TEST_ITERATIONS, 250). -record(qcst, {backend, % Backend module under test volatile, % Indicates if backend is volatile c, % Backend config s, % Module state returned by Backend:start olds=sets:new(), % Old states after a stop - d=[]}).% Orddict of values stored + d=[], % Orddict of values stored + i=ordsets:new()}). % List of indexes %% ==================================================================== %% Public API @@ -74,7 +77,8 @@ test(Backend, Volatile) -> test(Backend, Volatile, []). test(Backend, Volatile, Config) -> - test(Backend, Volatile, Config, fun(_BeState,_Olds) -> ok end). + test(Backend, Volatile, Config, fun(BeState,_Olds) -> + catch(Backend:stop(BeState)) end). test(Backend, Volatile, Config, Cleanup) -> test(Backend, Volatile, Config, Cleanup, ?TEST_ITERATIONS). @@ -103,10 +107,11 @@ prop_backend(Backend, Volatile, Config, Cleanup) -> command_names(Cmds))]), ?debugFmt("Result: ~p~n", [Res]), ?debugFmt("History: ~p~n", [H]), - ?debugFmt("BE Config: ~p~nBE State: ~p~nD: ~p~n", + ?debugFmt("BE Config: ~p~nBE State: ~p~nD: ~p~nI: ~p~n", [S#qcst.c, S#qcst.s, - orddict:to_list(S#qcst.d)]) + orddict:to_list(S#qcst.d), + ordsets:to_list(S#qcst.i)]) end, equals(ok, Res))) end @@ -130,11 +135,64 @@ val() -> %% differ since at this point in the processing %% pipeline the information has already been %% extracted. - term_to_binary(riak_object:new(<<"b1">>, <<"k1">>, binary())). + term_to_binary(riak_object:new(<<"b1">>, <<"k1">>, <<"v1">>)). g_opts() -> frequency([{5, [async_fold]}, {2, []}]). +fold_keys_opts() -> + frequency([{5, [async_fold]}, {2, []}, {2, [{index, bucket(), index_query()}]}, {2, [{bucket, bucket()}]}]). + +index_specs() -> + ?LET(L, list(index_spec()), lists:usort(L)). + +index_spec() -> + oneof([ + {add, bin_index(), bin_posting()}, + {remove, bin_index(), bin_posting()}, + {add, int_index(), int_posting()}, + {remove, int_index(), int_posting()} + ]). + +index_query() -> + oneof([ + {eq, <<"$bucket">>, bucket()}, %% the bucket() in this query is ignored/transformed + range_query(<<"$key">>, key(), key()), + {eq, <<"$key">>, key()}, + eq_query(), + range_query() + ]). + +eq_query() -> + oneof([ + {eq, bin_index(), bin_posting()}, + {eq, int_index(), int_posting()} + ]). + +range_query() -> + oneof([ + range_query(bin_index(), bin_posting(), bin_posting()), + range_query(int_index(), int_posting(), int_posting()) + ]). + +range_query(Idx, Min0, Max0) -> + ?LET({Min, Max}, {Min0, Max0}, + if Min > Max -> + {range, Idx, Max, Min}; + true -> + {range, Idx, Min, Max} + end). + +bin_index() -> elements([<<"x_bin">>, <<"y_bin">>, <<"z_bin">>]). +bin_posting() -> + elements([ <> || C <- lists:seq($a, $z) ]). + +int_index() -> elements([<<"i_int">>, <<"j_int">>, <<"k_int">>]). +int_posting() -> + int(). + + + %%==================================================================== %% Helpers %%==================================================================== @@ -192,11 +250,23 @@ init_backend(Backend, _Volatile, Config) -> S. drop(Backend, State) -> - case Backend:drop(State) of + State1 = case Backend:drop(State) of + {ok, NewState} -> + NewState; + {error, _, NewState} -> + NewState + end, + Backend:stop(State1). + +delete(Bucket, Key, Backend, BackendState, Indexes) -> + IndexSpecs = [{remove, Idx, SKey} || {B,Idx,SKey,K} <- Indexes, + B == Bucket, + K == Key], + case Backend:delete(Bucket, Key, IndexSpecs, BackendState) of {ok, NewState} -> - NewState; - {error, _, NewState} -> - NewState + {ok, NewState}; + {error, Reason, _NewState} -> + {error, Reason} end. get_fold_buffer() -> @@ -231,6 +301,27 @@ receive_fold_results(Acc) -> receive_fold_results(Acc++Results) end. +-type index_list() :: ordsets:ordset({riak_object:bucket(), binary(), binary() | integer(), riak_object:key()}). +-spec update_indexes(Bucket, Key, IndexSpecs, Indexes) -> Indexes1 when + Bucket :: riak_object:bucket(), + Key :: riak_object:key(), + IndexSpecs :: [ {add, binary(), binary() | integer()} | {remove, binary(), binary() | integer()} ], + Indexes :: index_list(), + Indexes1 :: index_list(). +update_indexes(_Bucket, _Key, [], Indexes) -> + Indexes; +update_indexes(Bucket, Key, [{add, Index, SKey}|T], Indexes) -> + PostingKey = {Bucket, Index, SKey, Key}, + update_indexes(Bucket, Key, T, ordsets:add_element(PostingKey, Indexes)); +update_indexes(Bucket, Key, [{remove, Index, SKey}|T], Indexes) -> + PostingKey = {Bucket, Index, SKey, Key}, + update_indexes(Bucket, Key, T, ordsets:del_element(PostingKey,Indexes)). + +remove_indexes(Bucket, Key, Indexes) -> + ordsets:filter(fun({B,_,_,K}) -> + B /= Bucket orelse K /= Key + end, Indexes). + %%==================================================================== %% eqc_fsm callbacks %%==================================================================== @@ -250,15 +341,21 @@ initial_state_data(Backend, Volatile, Config) -> next_state_data(running, stopped, S, _R, {call, _M, stop, _}) -> S#qcst{d=orddict:new(), - olds = sets:add_element(S#qcst.s, S#qcst.olds)}; + olds = sets:add_element(S#qcst.s, S#qcst.olds), + i= ordsets:new()}; next_state_data(stopped, running, S, R, {call, _M, init_backend, _}) -> S#qcst{s=R}; -next_state_data(_From, _To, S, _R, {call, _M, put, [Bucket, Key, [], Val, _]}) -> - S#qcst{d = orddict:store({Bucket, Key}, Val, S#qcst.d)}; -next_state_data(_From, _To, S, _R, {call, _M, delete, [Bucket, Key, [], _]}) -> - S#qcst{d = orddict:erase({Bucket, Key}, S#qcst.d)}; -next_state_data(_From, _To, S, R, {call, ?MODULE, drop, _}) -> - S#qcst{d=orddict:new(), s=R}; +next_state_data(_From, _To, S, _R, {call, _M, put, [Bucket, Key, IndexSpecs, Val, _]}) -> + S#qcst{d = orddict:store({Bucket, Key}, Val, S#qcst.d), + i = update_indexes(Bucket, Key, IndexSpecs, S#qcst.i)}; +next_state_data(_From, _To, S, _R, {call, _M, delete, [Bucket, Key|_]}) -> + S#qcst{d = orddict:erase({Bucket, Key}, S#qcst.d), + i = remove_indexes(Bucket, Key, S#qcst.i)}; +next_state_data(_From, _To, S, _R, {call, ?MODULE, drop, _}) -> + + S#qcst{d=orddict:new(), + s=undefined, + i=ordsets:new()}; next_state_data(_From, _To, S, _R, _C) -> S. @@ -269,19 +366,26 @@ stopped(#qcst{backend=Backend, {call, ?MODULE, init_backend, [Backend, Volatile, Config]}}]. running(#qcst{backend=Backend, - s=State}) -> + s=State, + i=Indexes}) -> [ - {history, {call, Backend, put, [bucket(), key(), [], val(), State]}}, + {history, {call, Backend, put, [bucket(), key(), index_specs(), val(), State]}}, {history, {call, Backend, get, [bucket(), key(), State]}}, - {history, {call, Backend, delete, [bucket(), key(), [], State]}}, + {history, {call, ?MODULE, delete, [bucket(), key(), Backend, State, Indexes]}}, {history, {call, Backend, fold_buckets, [fold_buckets_fun(), get_fold_buffer(), g_opts(), State]}}, - {history, {call, Backend, fold_keys, [fold_keys_fun(), get_fold_buffer(), g_opts(), State]}}, + {history, {call, Backend, fold_keys, [fold_keys_fun(), get_fold_buffer(), fold_keys_opts(), State]}}, {history, {call, Backend, fold_objects, [fold_objects_fun(), get_fold_buffer(), g_opts(), State]}}, {history, {call, Backend, is_empty, [State]}}, - {history, {call, ?MODULE, drop, [Backend, State]}}, + {stopped, {call, ?MODULE, drop, [Backend, State]}}, {stopped, {call, Backend, stop, [State]}} ]. +dynamic_precondition(_From,_To,#qcst{backend=Backend},{call, _M, fold_keys, [_FoldFun, _Acc, [{index, Bucket, _}], BeState]}) -> + {ok, Capabilities} = Backend:capabilities(Bucket, BeState), + lists:member(indexes, Capabilities); +dynamic_precondition(_From,_To,_S,_C) -> + true. + precondition(_From,_To,_S,_C) -> true. @@ -297,7 +401,7 @@ postcondition(_From, _To, _S, {call, _M, put, [_Bucket, _Key, _IndexEntries, _Val, _BeState]}, {R, _RState}) -> R =:= ok orelse R =:= already_exists; postcondition(_From, _To, _S, - {call, _M, delete,[_Bucket, _Key, _IndexEntries, _BeState]}, {R, _RState}) -> + {call, _M, delete, _}, {R, _RState}) -> R =:= ok; postcondition(_From, _To, S, {call, _M, fold_buckets, [_FoldFun, _Acc, _Opts, _BeState]}, FoldRes) -> @@ -314,6 +418,101 @@ postcondition(_From, _To, S, end, R = receive_fold_results([]), lists:usort(Buckets) =:= lists:sort(R); +postcondition(_From, _To, S, + {call, _M, fold_keys, [_FoldFun, _Acc, [{index, Bucket,{eq, <<"$bucket">>, _}}], _BeState]}, FoldRes) -> + ExpectedEntries = orddict:to_list(S#qcst.d), + Keys = [{B, Key} || {{B, Key}, _} <- ExpectedEntries, B == Bucket], + From = {raw, foldid, self()}, + case FoldRes of + {async, Work} -> + Pool = erlang:get(worker_pool), + FinishFun = finish_fun(From), + riak_core_vnode_worker_pool:handle_work(Pool, {fold, Work, FinishFun}, From); + {ok, Buffer} -> + finish_fold(Buffer, From) + end, + R = receive_fold_results([]), + lists:sort(Keys) =:= lists:sort(R); +postcondition(From, To, S, {call, _M, fold_keys, [FoldFun, Acc, [{index, Bucket, {eq, <<"$key">>, Val}}], BeState]}, FoldRes) -> + %% Equality query on $key should be the same as a range with equal endpoints. + postcondition(From, To, S, {call, _M, fold_keys, [FoldFun, Acc, [{index, Bucket, {range, <<"$key">>, Val, Val}}], BeState]}, FoldRes); +postcondition(_From, _To, S, + {call, _M, fold_keys, [_FoldFun, _Acc, [{index, Bucket,{range, <<"$key">>, Min, Max}}], _BeState]}, FoldRes) -> + ExpectedEntries = orddict:to_list(S#qcst.d), + Keys = [{B, Key} || {{B, Key}, _} <- ExpectedEntries, + B == Bucket, Key =< Max, Key >= Min], + From = {raw, foldid, self()}, + case FoldRes of + {async, Work} -> + Pool = erlang:get(worker_pool), + FinishFun = finish_fun(From), + riak_core_vnode_worker_pool:handle_work(Pool, {fold, Work, FinishFun}, From); + {ok, Buffer} -> + finish_fold(Buffer, From) + end, + R = receive_fold_results([]), + case lists:sort(Keys) =:= lists:sort(R) of + true -> true; + _ -> + [{expected, Keys},{received, R}] + end; +postcondition(_From, _To, S, + {call, _M, fold_keys, [_FoldFun, _Acc, [{index, Bucket, {eq, Idx, SKey}}], _BeState]}, FoldRes) -> + Keys = [ {B,K} || {B, I, V, K} <- ordsets:to_list(S#qcst.i), + B == Bucket, I == Idx, V == SKey], + From = {raw, foldid, self()}, + case FoldRes of + {async, Work} -> + Pool = erlang:get(worker_pool), + FinishFun = finish_fun(From), + riak_core_vnode_worker_pool:handle_work(Pool, {fold, Work, FinishFun}, From); + {ok, Buffer} -> + finish_fold(Buffer, From) + end, + R = receive_fold_results([]), + case lists:sort(Keys) =:= lists:sort(R) of + true -> true; + _ -> + [{expected, Keys},{received, R}] + end; +postcondition(_From, _To, S, + {call, _M, fold_keys, [_FoldFun, _Acc, [{index, Bucket, {range, Idx, Min, Max}}], _BeState]}, FoldRes) -> + Keys = [ {B, K} || {B, I, V, K} <- ordsets:to_list(S#qcst.i), + B == Bucket, I == Idx, V =< Max, V >= Min ], + From = {raw, foldid, self()}, + case FoldRes of + {async, Work} -> + Pool = erlang:get(worker_pool), + FinishFun = finish_fun(From), + riak_core_vnode_worker_pool:handle_work(Pool, {fold, Work, FinishFun}, From); + {ok, Buffer} -> + finish_fold(Buffer, From) + end, + R = receive_fold_results([]), + case lists:sort(Keys) =:= lists:sort(R) of + true -> true; + _ -> + [{expected, Keys},{received, R}] + end; +postcondition(_From, _To, S, + {call, _M, fold_keys, [_FoldFun, _Acc, [{bucket, B}], _BeState]}, FoldRes) -> + ExpectedEntries = orddict:to_list(S#qcst.d), + Keys = [{Bucket, Key} || {{Bucket, Key}, _} <- ExpectedEntries, Bucket == B], + From = {raw, foldid, self()}, + case FoldRes of + {async, Work} -> + Pool = erlang:get(worker_pool), + FinishFun = finish_fun(From), + riak_core_vnode_worker_pool:handle_work(Pool, {fold, Work, FinishFun}, From); + {ok, Buffer} -> + finish_fold(Buffer, From) + end, + R = receive_fold_results([]), + case lists:sort(Keys) =:= lists:sort(R) of + true -> true; + _ -> + [{expected, Keys},{received, R}] + end; postcondition(_From, _To, S, {call, _M, fold_keys, [_FoldFun, _Acc, _Opts, _BeState]}, FoldRes) -> ExpectedEntries = orddict:to_list(S#qcst.d), @@ -328,7 +527,11 @@ postcondition(_From, _To, S, finish_fold(Buffer, From) end, R = receive_fold_results([]), - lists:sort(Keys) =:= lists:sort(R); + case lists:sort(Keys) =:= lists:sort(R) of + true -> true; + _ -> + [{expected, Keys},{received, R}] + end; postcondition(_From, _To, S, {call, _M, fold_objects, [_FoldFun, _Acc, _Opts, _BeState]}, FoldRes) -> ExpectedEntries = orddict:to_list(S#qcst.d), @@ -350,4 +553,3 @@ postcondition(_From, _To, _S, _C, _R) -> true. -endif. - diff --git a/test/fsm_eqc_util.erl b/test/fsm_eqc_util.erl index 4bd4744665..509e09f064 100644 --- a/test/fsm_eqc_util.erl +++ b/test/fsm_eqc_util.erl @@ -182,6 +182,9 @@ start_mock_servers() -> {ok, _Pid3} = fsm_eqc_vnode:start_link(), application:load(riak_core), application:start(crypto), + application:start(folsom), + riak_core_stat_cache:start_link(), + riak_kv_stat:register_stats(), riak_core_ring_events:start_link(), riak_core_node_watcher_events:start_link(), riak_core_node_watcher:start_link(), @@ -189,6 +192,7 @@ start_mock_servers() -> ok. cleanup_mock_servers() -> + application:stop(folsom), application:stop(riak_core). make_options([], Options) -> diff --git a/test/get_fsm_qc.erl b/test/get_fsm_qc.erl index b0072fc9cb..df6ad7fed9 100644 --- a/test/get_fsm_qc.erl +++ b/test/get_fsm_qc.erl @@ -33,14 +33,14 @@ num_oks = 0, del_oks = 0, num_errs = 0}). - + %%==================================================================== -%% eunit test +%% eunit test %%==================================================================== eqc_test_() -> - {spawn, + {spawn, [{setup, fun setup/0, fun cleanup/1, @@ -73,12 +73,12 @@ coverage_test() -> riak_kv_test_util:call_unused_fsm_funs(riak_kv_get_fsm). %%==================================================================== -%% Shell helpers +%% Shell helpers %%==================================================================== prepare() -> fsm_eqc_util:start_mock_servers(). - + test() -> test(100). @@ -148,17 +148,17 @@ r_seed() -> detail() -> frequency([{1, timing}, {1, vnodes}, {1, not_a_detail}]). - + details() -> frequency([{10, true}, %% All details requested {10, list(detail())}, { 1, false}]). - + bool_prop(Name) -> frequency([{4, {Name, true}}, {1, Name}, {5, {Name, false}}]). -option() -> +option() -> frequency([{1, {details, details()}}, {1, bool_prop(notfound_ok)}, {1, bool_prop(deletedvclock)}, @@ -186,9 +186,9 @@ prop_basic_get() -> application:set_env(riak_core, default_bucket_props, BucketProps), - + [{_,Object}|_] = Objects, - + Options = fsm_eqc_util:make_options([{r, R}, {pr, PR}], [{timeout, 200} | Options0]), {ok, GetPid} = riak_kv_get_fsm:test_link({raw, ReqId, self()}, @@ -244,9 +244,9 @@ prop_basic_get() -> PerfectPreflist = lists:all(fun({{_Idx,_Node},primary}) -> true; ({{_Idx,_Node},fallback}) -> false end, PL2), - - {RetResult, RetInfo} = case Res of + + {RetResult, RetInfo} = case Res of timeout -> {Res, undefined}; {ok, _RetObj} -> @@ -257,7 +257,7 @@ prop_basic_get() -> {{ok, RetObj}, Info0}; {error, Reason, Info0} -> {{error, Reason}, Info0} - end, + end, ?WHENFAIL( begin io:format("Res: ~p\n", [Res]), @@ -287,7 +287,7 @@ prop_basic_get() -> make_preflist2([], _Index, PL2) -> lists:reverse(PL2); make_preflist2([{_PartVal, PrimaryFallback} | Rest], Index, PL2) -> - make_preflist2(Rest, Index + 1, + make_preflist2(Rest, Index + 1, [{{Index, whereis(fsm_eqc_vnode)}, PrimaryFallback} | PL2]). %% Make responses @@ -295,7 +295,7 @@ make_partvals([], PartVals) -> lists:reverse(PartVals); make_partvals([{PartVal, _PrimaryFallback} | Rest], PartVals) -> make_partvals(Rest, [PartVal | PartVals]). - + %% Work out R given a seed. %% Generate a value from 0..N+1 @@ -363,7 +363,9 @@ check_info([], _State) -> true; check_info([{not_a_detail, unknown_detail} | Rest], State) -> check_info(Rest, State); -check_info([{duration, _} | Rest], State) -> +check_info([{response_usecs, _} | Rest], State) -> + check_info(Rest, State); +check_info([{stages, _} | Rest], State) -> check_info(Rest, State); check_info([{vnode_oks, VnodeOks} | Rest], State = #state{num_oks = NumOks}) -> %% How many Ok's in first RealR responses received by FSM. @@ -403,8 +405,8 @@ check_delete(Objects, RepairH, H, PerfectPreflist) -> %% and a perfect preflist and the object is deleted RetLins = [Lineage || {_Idx, {ok, Lineage}} <- H], URetLins = lists:usort(RetLins), - Expected = case PerfectPreflist andalso - length(RetLins) == length(H) andalso + Expected = case PerfectPreflist andalso + length(RetLins) == length(H) andalso length(URetLins) == 1 andalso riak_kv_util:is_x_deleted(proplists:get_value(hd(URetLins), Objects)) of true -> @@ -418,7 +420,7 @@ check_delete(Objects, RepairH, H, PerfectPreflist) -> all_distinct(Xs) -> equals(lists:sort(Xs),lists:usort(Xs)). - + build_merged_object([], _Objects) -> undefined; build_merged_object(Heads, Objects) -> @@ -504,6 +506,6 @@ expect(H, State = #state{n = N, real_r = R, deleted = Deleted, notfound_is_ok = end end. - - + + -endif. % EQC diff --git a/test/keys_fsm_eqc.erl b/test/keys_fsm_eqc.erl index 41ef7d6036..a8229d7491 100644 --- a/test/keys_fsm_eqc.erl +++ b/test/keys_fsm_eqc.erl @@ -42,8 +42,8 @@ eqc_test_() -> {spawn, [{setup, - fun setup/0, - fun cleanup/1, + riak_kv_test_util:common_setup(?MODULE, fun configure/1), + riak_kv_test_util:common_cleanup(?MODULE, fun configure/1), [%% Run the quickcheck tests {timeout, 60000, % timeout is in msec ?_assertEqual(true, quickcheck(numtests(?TEST_ITERATIONS, ?QC_OUT(prop_basic_listkeys()))))} @@ -52,43 +52,6 @@ eqc_test_() -> ] }. -setup() -> - %% Shut logging up - too noisy. - application:load(sasl), - application:set_env(sasl, sasl_error_logger, {file, "keys_fsm_eqc_sasl.log"}), - error_logger:tty(false), - error_logger:logfile({open, "keys_fsm_eqc.log"}), - - %% Cleanup in case a previous test did not - cleanup(setup), - %% Pause the make sure everything is cleaned up - timer:sleep(2000), - - %% Start erlang node - TestNode = list_to_atom("testnode" ++ integer_to_list(element(3, now()))), - net_kernel:start([TestNode, shortnames]), - do_dep_apps(start, dep_apps()), - - %% Create a fresh ring for the test - riak_core_ring_manager:ring_trans( - fun(R0, _Args) -> - R1 = riak_core_ring:add_member(node(), R0, node()), - R2 = lists:foldl(fun({I, _OldNode}, RAcc) -> - riak_core_ring:transfer_node(I, node(), RAcc) - end, R1, riak_core_ring:all_owners(R0)), - {new_ring, R2} - end, undefined), - ok. - -cleanup(_) -> - do_dep_apps(stop, lists:reverse(dep_apps())), - catch exit(whereis(riak_kv_vnode_master), kill), %% Leaks occasionally - catch exit(whereis(riak_sysmon_filter), kill), %% Leaks occasionally - net_kernel:stop(), - %% Reset the riak_core vnode_modules - application:set_env(riak_core, vnode_modules, []), - ok. - %% Call unused callback functions to clear them in the coverage %% checker so the real code stands out. coverage_test() -> @@ -99,71 +62,62 @@ coverage_test() -> %% ==================================================================== prop_basic_listkeys() -> - ?FORALL({ReqId, Bucket, KeyFilter, NVal, ObjectCount, Timeout, ClientType}, - {g_reqid(), g_bucket(), g_key_filter(), g_n_val(), g_object_count(), g_timeout(), g_client_type()}, - ?TRAPEXIT( - begin - {ok, Client} = riak:local_client(), - {ok, Buckets} = Client:list_buckets(), - case lists:member(Bucket, Buckets) of - true -> % bucket has already been used - %% Delete the existing keys in the bucket - {ok, OldKeys} = Client:list_keys(Bucket), - [Client:delete(Bucket, OldKey) || OldKey <- OldKeys]; - false -> - ok - end, - %% Set bucket properties - BucketProps = riak_core_bucket:get_bucket(Bucket), - NewBucketProps = orddict:store(n_val, NVal, BucketProps), - riak_core_bucket:set_bucket(Bucket, NewBucketProps), - %% Create objects in bucket - GeneratedKeys = [list_to_binary(integer_to_list(X)) || X <- lists:seq(1, ObjectCount)], - [ok = Client:put(riak_object:new(Bucket, Key, <<"val">>)) || Key <- GeneratedKeys], - - %% Set the expected output based on if a - %% key filter is being used or not. - case KeyFilter of - none -> - ExpectedKeys = GeneratedKeys; - _ -> - ExpectedKeyFilter = - fun(K, Acc) -> - case KeyFilter(K) of - true -> - [K | Acc]; - false -> - Acc - end - end, - ExpectedKeys = lists:foldl(ExpectedKeyFilter, [], GeneratedKeys) - end, - %% Call start_link - Keys = start_link(ReqId, Bucket, KeyFilter, Timeout, ClientType), - ?WHENFAIL( - begin - io:format("Bucket: ~p n_val: ~p ObjectCount: ~p KeyFilter: ~p~n", [Bucket, NVal, ObjectCount, KeyFilter]), - io:format("Expected Key Count: ~p Actual Key Count: ~p~n", - [length(ExpectedKeys), length(Keys)]), - io:format("Expected Keys: ~p~nActual Keys: ~p~n", - [ExpectedKeys, lists:sort(Keys)]) - end, - conjunction( - [ - {results, equals(lists:sort(Keys), lists:sort(ExpectedKeys))} - ])) - - end - )). + ?FORALL({ReqId, Bucket, KeyFilter, NVal, ObjectCount, Timeout}, + {g_reqid(), g_bucket(), g_key_filter(), g_n_val(), g_object_count(), g_timeout()}, + ?TRAPEXIT( + begin + riak_kv_memory_backend:reset(), + {ok, Client} = riak:local_client(), + BucketProps = riak_core_bucket:get_bucket(Bucket), + NewBucketProps = orddict:store(n_val, NVal, BucketProps), + riak_core_bucket:set_bucket(Bucket, NewBucketProps), + %% Create objects in bucket + GeneratedKeys = [list_to_binary(integer_to_list(X)) || X <- lists:seq(1, ObjectCount)], + [ok = Client:put(riak_object:new(Bucket, Key, <<"val">>)) || Key <- GeneratedKeys], + + %% Set the expected output based on if a + %% key filter is being used or not. + case KeyFilter of + none -> + ExpectedKeys = GeneratedKeys; + _ -> + ExpectedKeyFilter = + fun(K, Acc) -> + case KeyFilter(K) of + true -> + [K | Acc]; + false -> + Acc + end + end, + ExpectedKeys = lists:foldl(ExpectedKeyFilter, [], GeneratedKeys) + end, + %% Call start_link + Keys = start_link(ReqId, Bucket, KeyFilter, Timeout), + ?WHENFAIL( + begin + io:format("Bucket: ~p n_val: ~p ObjectCount: ~p KeyFilter: ~p~n", [Bucket, NVal, ObjectCount, KeyFilter]), + io:format("Expected Key Count: ~p Actual Key Count: ~p~n", + [length(ExpectedKeys), length(Keys)]), + io:format("Expected Keys: ~p~nActual Keys: ~p~n", + [ExpectedKeys, lists:sort(Keys)]) + end, + conjunction( + [ + {results, equals(lists:sort(Keys), lists:sort(ExpectedKeys))} + ])) + + end + )). %%==================================================================== %% Wrappers %%==================================================================== -start_link(ReqId, Bucket, Filter, Timeout, ClientType) -> +start_link(ReqId, Bucket, Filter, Timeout) -> Sink = spawn(?MODULE, data_sink, [ReqId, [], false]), From = {raw, ReqId, Sink}, - {ok, _FsmPid} = riak_core_coverage_fsm:start_link(riak_kv_keys_fsm, From, [Bucket, Filter, Timeout, ClientType]), + {ok, _FsmPid} = riak_core_coverage_fsm:start_link(riak_kv_keys_fsm, From, [Bucket, Filter, Timeout]), wait_for_replies(Sink, ReqId). %%==================================================================== @@ -185,10 +139,6 @@ g_key_filter() -> end, frequency([{5, none}, {2, KeyFilter}]). -g_client_type() -> - %% TODO: Incorporate mapred type - plain. - g_n_val() -> choose(1,5). @@ -205,15 +155,12 @@ g_timeout() -> %% Helpers %%==================================================================== -prepare() -> - application:load(sasl), - error_logger:delete_report_handler(sasl_report_tty_h), - error_logger:delete_report_handler(error_logger_tty_h), - - TestNode = list_to_atom("testnode" ++ integer_to_list(element(3, now())) ++ - "@localhost"), - {ok, _} = net_kernel:start([TestNode, longnames]), - do_dep_apps(start, dep_apps()), +configure(load) -> + application:set_env(riak_kv, storage_backend, riak_kv_memory_backend), + application:set_env(riak_kv, test, true), + application:set_env(riak_kv, vnode_vclocks, true), + application:set_env(riak_kv, delete_mode, immediate); +configure(_) -> ok. test() -> @@ -225,34 +172,11 @@ test(N) -> check() -> check(prop_basic_listkeys(), current_counterexample()). -dep_apps() -> - SetupFun = - fun(start) -> - %% Set some missing env vars that are normally - %% part of release packaging. - application:set_env(riak_core, ring_creation_size, 64), - application:set_env(riak_kv, storage_backend, riak_kv_memory_backend), - application:set_env(riak_kv, vnode_vclocks, true), - application:set_env(riak_kv, delete_mode, immediate), - application:set_env(riak_kv, legacy_keylisting, false), - - %% Start riak_kv - timer:sleep(500); - (stop) -> - ok - end, - XX = fun(_) -> error_logger:info_msg("Registered: ~w\n", [lists:sort(registered())]) end, - [sasl, crypto, riak_sysmon, webmachine, XX, os_mon, - lager, riak_core, XX, luke, erlang_js, - inets, mochiweb, riak_pipe, SetupFun, riak_kv, SetupFun]. - -do_dep_apps(StartStop, Apps) -> - lists:map(fun(A) when is_atom(A) -> application:StartStop(A); - (F) -> F(StartStop) - end, Apps). - data_sink(ReqId, KeyList, Done) -> receive + {ReqId, From={_Pid,_Ref}, {keys, Keys}} -> + riak_kv_keys_fsm:ack_keys(From), + data_sink(ReqId, KeyList++Keys, false); {ReqId, {keys, Keys}} -> data_sink(ReqId, KeyList++Keys, false); {ReqId, done} -> @@ -289,5 +213,4 @@ wait_for_replies(Sink, ReqId) -> ?debugFmt("Received keys for older run: ~p~n", [ORef]) end. - -endif. % EQC - +-endif. % EQC diff --git a/test/mapred_test.erl b/test/mapred_test.erl index 861cf2e7eb..27a4a7c87b 100644 --- a/test/mapred_test.erl +++ b/test/mapred_test.erl @@ -21,128 +21,34 @@ -module(mapred_test). -include_lib("eunit/include/eunit.hrl"). - +-include_lib("riak_pipe/include/riak_pipe.hrl"). -compile(export_all). -dep_apps() -> - DelMe = "./EUnit-SASL.log", - DataDir = "./EUnit-datadir", - os:cmd("rm -rf " ++ DataDir), - os:cmd("mkdir " ++ DataDir), - KillDamnFilterProc = fun() -> - catch exit(whereis(riak_sysmon_filter), kill), - wait_until_dead(whereis(riak_sysmon_filter)) - end, - Core_Settings = [{handoff_ip, "0.0.0.0"}, - {handoff_port, 9183}, - {ring_creation_size, 16}, - {ring_state_dir, DataDir}], - KV_Settings = [{storage_backend, riak_kv_memory_backend}, - {vnode_vclocks, true}, - {pb_ip, "0.0.0.0"}, - {pb_port, 48087}, % arbitrary # - {map_js_vm_count, 4}, - {reduce_js_vm_count, 3}], - [ - fun(start) -> - net_kernel:start([mapred_test@localhost, shortnames]), - timer:sleep(50), - _ = application:stop(sasl), - _ = application:load(sasl), - put(old_sasl_l, app_helper:get_env(sasl, sasl_error_logger)), - ok = application:set_env(sasl, sasl_error_logger, {file, DelMe}), - ok = application:start(sasl), - %%error_logger:tty(false); - error_logger:tty(true); - (stop) -> - ok = application:stop(sasl), - ok = application:set_env(sasl, sasl_error_logger, erase(old_sasl_l)); - (fullstop) -> - _ = application:stop(sasl) - end, - %% public_key and ssl are not needed here but started by others so - %% stop them when we're done. - crypto, public_key, ssl, - fun(start) -> - ok = application:start(riak_sysmon); - (stop) -> - ok = application:stop(riak_sysmon), - KillDamnFilterProc(); - (fullstop) -> - _ = application:stop(riak_sysmon), - KillDamnFilterProc() - end, - webmachine, - os_mon, - lager, - fun(start) -> - _ = application:load(riak_core), - %% riak_core_handoff_listener uses {reusaddr, true}, but - %% sometimes we just restart too quickly and hit an - %% eaddrinuse when restarting riak_core? - timer:sleep(1000), - %% io:format(user, "DEBUGG: ~s\n", [os:cmd("netstat -na | egrep -vi 'stream|dgram'")]), - [begin - put({?MODULE,AppKey}, app_helper:get_env(riak_core, AppKey)), - ok = application:set_env(riak_core, AppKey, Val) - end || {AppKey, Val} <- Core_Settings], - ok = application:start(riak_core); - (stop) -> - ok = application:stop(riak_core), - [ok = application:set_env(riak_core, AppKey, get({?MODULE, AppKey})) - || {AppKey, _Val} <- Core_Settings]; - (fullstop) -> - _ = application:stop(riak_core) - end, - riak_pipe, - luke, - erlang_js, - inets, - mochiweb, - fun(start) -> - _ = application:load(riak_kv), - [begin - put({?MODULE,AppKey}, app_helper:get_env(riak_kv, AppKey)), - ok = application:set_env(riak_kv, AppKey, Val) - end || {AppKey, Val} <- KV_Settings], - ok = application:start(riak_kv); - (stop) -> - ok = application:stop(riak_kv), - net_kernel:stop(), - [ok = application:set_env(riak_kv, AppKey, get({?MODULE, AppKey})) - || {AppKey, _Val} <- KV_Settings]; - (fullstop) -> - _ = application:stop(riak_kv) - end]. - -do_dep_apps(fullstop) -> - lists:map(fun(A) when is_atom(A) -> _ = application:stop(A); - (F) -> F(fullstop) - end, lists:reverse(dep_apps())); -do_dep_apps(StartStop) -> - Apps = if StartStop == start -> dep_apps(); - StartStop == stop -> lists:reverse(dep_apps()) - end, - lists:map(fun(A) when is_atom(A) -> ok = application:StartStop(A); - (F) -> F(StartStop) - end, Apps). - -prepare_runtime() -> - fun() -> - do_dep_apps(fullstop), - timer:sleep(50), - do_dep_apps(start), - timer:sleep(50), - riak_core:wait_for_service(riak_kv), - riak_core:wait_for_service(riak_pipe), - [foo1, foo2] - end. - -teardown_runtime() -> - fun(_PrepareThingie) -> - do_dep_apps(stop), - timer:sleep(50) - end. +setup() -> + riak_kv_test_util:common_setup(?MODULE, fun configure/1). + +cleanup() -> + riak_kv_test_util:common_cleanup(?MODULE, fun configure/1). + +configure(load) -> + KVSettings = [{storage_backend, riak_kv_memory_backend}, + {test, true}, + {vnode_vclocks, true}, + {pb_ip, "0.0.0.0"}, + {pb_port, 0}, % arbitrary # + {map_js_vm_count, 4}, + {reduce_js_vm_count, 3}], + CoreSettings = [{handoff_ip, "0.0.0.0"}, + {handoff_port, 0}, + {ring_creation_size, 16}], + [ application:set_env(riak_core, K, V) || {K,V} <- CoreSettings ], + [ application:set_env(riak_kv, K, V) || {K,V} <- KVSettings ], + ok; + +configure(start) -> + riak_core:wait_for_service(riak_pipe); +configure(_) -> + ok. inputs_gen_seq(Pipe, Max, _Timeout) -> [riak_pipe:queue_work(Pipe, X) || X <- lists:seq(1, Max)], @@ -156,30 +62,6 @@ inputs_gen_bkeys_1(Pipe, {Bucket, Start, End}, _Timeout) -> riak_pipe:eoi(Pipe), ok. -setup_demo_test_() -> - {foreach, - prepare_runtime(), - teardown_runtime(), - [ - fun(_) -> - {"Setup demo test", - fun() -> - Num = 5, - {ok, C} = riak:local_client(), - [ok = C:put(riak_object:new( - <<"foonum">>, - list_to_binary("bar"++integer_to_list(X)), - X)) - || X <- lists:seq(1, Num)], - [{ok, _} = C:get(<<"foonum">>, - list_to_binary("bar"++integer_to_list(X))) - || X <- lists:seq(1, Num)], - ok - end} - end - ] - }. - compat_basic1_test_() -> IntsBucket = <<"foonum">>, ReduceSumFun = fun(Inputs, _) -> [lists:sum(Inputs)] end, @@ -187,8 +69,8 @@ compat_basic1_test_() -> LinkKey = <<"yo">>, {setup, - prepare_runtime(), - teardown_runtime(), + setup(), + cleanup(), fun(_) -> [ ?_test( @@ -211,9 +93,6 @@ compat_basic1_test_() -> %% This will trigger a traversal of IntsBucket, but %% because the query is empty, the MapReduce will %% traverse the bucket and send BKeys down the pipe. - %% AFAICT, the original Riak MapReduce will crash with - %% luke_flow errors if the query list is empty. This - %% new implementation will pass the BKeys as-is. {ok, BKeys} = riak_kv_mrc_pipe:mapred(IntsBucket, []), 5 = length(BKeys), @@ -379,8 +258,8 @@ compat_buffer_and_prereduce_test_() -> ReduceSumFun = fun(Inputs, _) -> [lists:sum(Inputs)] end, {setup, - prepare_runtime(), - teardown_runtime(), + setup(), + cleanup(), fun(_) -> [ ?_test( @@ -459,8 +338,8 @@ compat_javascript_test_() -> NotFoundBkey = {<<"does not">>, <<"exit">>}, {setup, - prepare_runtime(), - teardown_runtime(), + setup(), + cleanup(), fun(_) -> [ ?_test( @@ -588,6 +467,288 @@ compat_javascript_test_() -> ] end}. +dead_pipe_test_() -> + {setup, + setup(), + cleanup(), + fun(_) -> + [ + ?_test( + %% Verify that sending inputs to a pipe that has already + %% stopped raises an error (synchronous send) + begin + Spec = + [{map, {modfun, riak_kv_mapreduce, map_object_value}, + none, true}], + {{ok, Pipe}, _NumKeeps} = + riak_kv_mrc_pipe:mapred_stream(Spec), + riak_pipe:destroy(Pipe), + {error, Reason} = riak_kv_mrc_pipe:send_inputs( + Pipe, [{<<"foo">>, <<"bar">>}]), + %% Each vnode should have received the input, but + %% being unable to find the fitting process, returned + %% `worker_startup_failed` (and probably also printed + %% "fitting was gone before startup") + ?assert(lists:member(worker_startup_failed, Reason)) + end), + ?_test( + %% Verify that sending inputs to a pipe that has already + %% stopped raises an error (async send) + begin + Spec = + [{map, {modfun, riak_kv_mapreduce, map_object_value}, + none, true}], + {{ok, Pipe}, _NumKeeps} = + riak_kv_mrc_pipe:mapred_stream(Spec), + riak_pipe:destroy(Pipe), + %% this is a hack to make sure that the async sender + %% doesn't die immediately upon linking to the + %% already-dead builder + PipeB = Pipe#pipe{builder=spawn(fake_builder(self()))}, + {Sender, SenderRef} = + riak_kv_mrc_pipe:send_inputs_async( + PipeB, [{<<"foo">>, <<"bar">>}]), + receive + {'DOWN', SenderRef, process, Sender, Error} -> + {error, Reason} = Error + end, + %% let the fake builder shut down now + PipeB#pipe.builder ! test_over, + %% Each vnode should have received the input, but + %% being unable to find the fitting process, returned + %% `worker_startup_failed` (and probably also printed + %% "fitting was gone before startup") + ?assert(lists:member(worker_startup_failed, Reason)) + end) + ] + end}. + +fake_builder(TestProc) -> + fun() -> + Ref = erlang:monitor(process, TestProc), + receive + test_over -> + ok; + {'DOWN',Ref,process,TestProc,_} -> + ok + end + end. + +notfound_failover_test_() -> + IntsBucket = <<"foonum">>, + NumInts = 5, + + {setup, + setup(), + cleanup(), + fun(_) -> + [ + ?_test( + %% The data created by this step is used by all/most of the + %% following tests. + ok = riak_kv_mrc_pipe:example_setup(NumInts) + ), + ?_test( + %% check the condition that used to bring down a pipe in + %% https://github.com/basho/riak_kv/issues/290 + %% this version checks it with an actual not-found + begin + QLimit = 3, + WaitRef = make_ref(), + Spec = + [{map, + {modfun, riak_kv_mapreduce, map_object_value}, + <<"include_keydata">>, false}, + {reduce, + {modfun, ?MODULE, reduce_wait_for_signal}, + [{reduce_phase_batch_size, 1}, + {wait, {self(), WaitRef}}], + true}], + PipeSpec = riak_kv_mrc_pipe:mapred_plan(Spec), + %% make it easier to fill + SmallPipeSpec = [ S#fitting_spec{q_limit=QLimit} + || S <- PipeSpec ], + {ok, Pipe} = riak_pipe:exec(SmallPipeSpec, + [{log, sink}, + {trace, [error, queue_full]}]), + ExistingKey = {IntsBucket, <<"bar1">>}, + ChashFun = (hd(SmallPipeSpec))#fitting_spec.chashfun, + MissingKey = find_adjacent_key(ChashFun, ExistingKey), + %% get main workers spun up + ok = riak_pipe:queue_work(Pipe, ExistingKey), + receive {waiting, WaitRef, ReducePid} -> ok end, + + %% reduce is now blocking, fill its queue + [ ok = riak_pipe:queue_work(Pipe, ExistingKey) + || _ <- lists:seq(1, QLimit) ], + + {NValMod,NValFun} = (hd(SmallPipeSpec))#fitting_spec.nval, + NVal = NValMod:NValFun(ExistingKey), + + %% each of N paths through the primary preflist + [ fill_map_queue(Pipe, QLimit, ExistingKey) + || _ <- lists:seq(1, NVal) ], + + %% check get queue actually full + ExpectedTOs = lists:duplicate(NVal, timeout), + {error, ExpectedTOs} = + riak_pipe:queue_work(Pipe, ExistingKey, noblock), + + %% now inject a missing key that would need to + %% failover to the full queue + ok = riak_pipe:queue_work(Pipe, {MissingKey, test_passing}), + %% and watch for it to block in the reduce queue + %% *this* is when pre-patched code would fail: + %% we'll receive an [error] trace from the kvget fitting's + %% failure to forward the bkey along its preflist + ok = consume_queue_full(Pipe, 1), + + %% let the pipe finish + riak_pipe:eoi(Pipe), + ReducePid ! {continue, WaitRef}, + + {eoi, Results, Logs} = riak_pipe:collect_results(Pipe), + %% the object does not exist, but we told the map + %% phase to send on its keydata - check for it + ?assert(lists:member({1, test_passing}, Results)), + %% just to be a little extra cautious, check for + %% other errors + ?assertEqual([], [E || {_,{trace,[error],_}}=E <- Logs]) + end), + ?_test( + %% check the condition that used to bring down a pipe in + %% https://github.com/basho/riak_kv/issues/290 + %% this version checks with an object that is missing a replica + begin + QLimit = 3, + WaitRef = make_ref(), + Spec = + [{map, + {modfun, riak_kv_mapreduce, map_object_value}, + none, false}, + {reduce, + {modfun, ?MODULE, reduce_wait_for_signal}, + [{reduce_phase_batch_size, 1}, + {wait, {self(), WaitRef}}], + true}], + PipeSpec = riak_kv_mrc_pipe:mapred_plan(Spec), + %% make it easier to fill + SmallPipeSpec = [ S#fitting_spec{q_limit=QLimit} + || S <- PipeSpec ], + {ok, Pipe} = riak_pipe:exec(SmallPipeSpec, + [{log, sink}, + {trace, [error, queue_full]}]), + ExistingKey = {IntsBucket, <<"bar1">>}, + ChashFun = (hd(SmallPipeSpec))#fitting_spec.chashfun, + {MissingBucket, MissingKey} = + find_adjacent_key(ChashFun, ExistingKey), + + %% create a value for the "missing" key + {ok, C} = riak:local_client(), + ok = C:put(riak_object:new(MissingBucket, MissingKey, + test_passing), + 3), + %% and now kill the first replica; + %% this will make the vnode local to the kvget pipe + %% fitting return an error (because it's the memory + %% backend), so it will have to look at another kv vnode + [{{PrimaryIndex, _},_}] = + riak_core_apl:get_primary_apl( + ChashFun({MissingBucket, MissingKey}), 1, riak_kv), + {ok, VnodePid} = riak_core_vnode_manager:get_vnode_pid( + PrimaryIndex, riak_kv_vnode), + exit(VnodePid, kill), + + %% get main workers spun up + ok = riak_pipe:queue_work(Pipe, ExistingKey), + receive {waiting, WaitRef, ReducePid} -> ok end, + + %% reduce is now blocking, fill its queue + [ ok = riak_pipe:queue_work(Pipe, ExistingKey) + || _ <- lists:seq(1, QLimit) ], + + {NValMod,NValFun} = (hd(SmallPipeSpec))#fitting_spec.nval, + NVal = NValMod:NValFun(ExistingKey), + + %% each of N paths through the primary preflist + [ fill_map_queue(Pipe, QLimit, ExistingKey) + || _ <- lists:seq(1, NVal) ], + + %% check get queue actually full + ExpectedTOs = lists:duplicate(NVal, timeout), + {error, ExpectedTOs} = + riak_pipe:queue_work(Pipe, ExistingKey, noblock), + + %% now inject a missing key that would need to + %% failover to the full queue + ok = riak_pipe:queue_work(Pipe, {MissingBucket, MissingKey}), + %% and watch for it to block in the reduce queue + %% *this* is when pre-patched code would fail: + %% we'll receive an [error] trace from the kvget fitting's + %% failure to forward the bkey along its preflist + ok = consume_queue_full(Pipe, 1), + + %% let the pipe finish + riak_pipe:eoi(Pipe), + ReducePid ! {continue, WaitRef}, + + {eoi, Results, Logs} = riak_pipe:collect_results(Pipe), + %% the object does not exist, but we told the map + %% phase to send on its keydata - check for it + ?assert(lists:member({1, test_passing}, Results)), + %% just to be a little extra cautious, check for + %% other errors + ?assertEqual([], [E || {_,{trace,[error],_}}=E <- Logs]) + end) + ] + end}. + +fill_map_queue(Pipe, QLimit, ExistingKey) -> + %% give the map worker one more to block on + ok = riak_pipe:queue_work(Pipe, ExistingKey, noblock), + consume_queue_full(Pipe, 1), + %% map is now blocking, fill its queue + [ ok = riak_pipe:queue_work(Pipe, ExistingKey, noblock) + || _ <- lists:seq(1, QLimit) ], + %% give the get worker one more to block on + ok = riak_pipe:queue_work(Pipe, ExistingKey, noblock), + consume_queue_full(Pipe, {xform_map, 0}), + %% get is now blocking, fill its queue + [ ok = riak_pipe:queue_work(Pipe, ExistingKey, noblock) + || _ <- lists:seq(1, QLimit) ], + ok. + +find_adjacent_key({Mod, Fun}, ExistingKey) -> + [ExistingHead|_] = riak_core_apl:get_primary_apl( + Mod:Fun(ExistingKey), 2, riak_kv), + [K|_] = lists:dropwhile( + fun(N) -> + K = {<<"foonum_missing">>, + list_to_binary(integer_to_list(N))}, + [_,Second] = riak_core_apl:get_primary_apl( + Mod:Fun(K), 2, riak_kv), + Second /= ExistingHead + end, + lists:seq(1, 1000)), + {<<"foonum_missing">>, list_to_binary(integer_to_list(K))}. + +consume_queue_full(Pipe, FittingName) -> + {log, {FittingName, {trace, [queue_full], _}}} = + riak_pipe:receive_result(Pipe, 5000), + ok. + +reduce_wait_for_signal(Inputs, Args) -> + case get(waited) of + true -> + Inputs; + _ -> + {TestProc, WaitRef} = proplists:get_value(wait, Args), + TestProc ! {waiting, WaitRef, self()}, + receive {continue, WaitRef} -> ok end, + put(waited, true), + Inputs + end. + wait_until_dead(Pid) when is_pid(Pid) -> Ref = monitor(process, Pid), receive diff --git a/test/put_fsm_eqc.erl b/test/put_fsm_eqc.erl index 000600eac9..3d03faf0f4 100644 --- a/test/put_fsm_eqc.erl +++ b/test/put_fsm_eqc.erl @@ -111,7 +111,7 @@ setup() -> application:load(lager), application:set_env(lager, handlers, [{lager_file_backend, [{?LAGER_LOGFILE, info, 10485760,"$D0",5}]}]), - ok = application:start(lager), + ok = lager:start(), %% Start up mock servers and dependencies fsm_eqc_util:start_mock_servers(), From 921f1763506586a886d04612c2f2a8fc14d916d0 Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Sat, 24 Nov 2012 22:00:18 +0000 Subject: [PATCH 06/25] Correct version of riak_object.erl --- src/riak_object.erl | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/src/riak_object.erl b/src/riak_object.erl index 4f9d0117a4..2d963f4992 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -36,7 +36,7 @@ -type value() :: term(). -record(r_content, { - metadata :: dict(), + metadata :: dict(), value :: term(), dvvclock :: dottedvv:dottedvv() }). @@ -56,7 +56,7 @@ -define(MAX_KEY_SIZE, 65536). --export([new/3, new/4, ensure_robject/1, equal/2, reconcile/2]). +-export([new/3, new/4, ensure_robject/1, equal/2, reconcile/3, reconcile/2]). -export([increment_vclock/2, increment_vclock/3, update_vclock/3]). -export([key/1, get_metadata/1, get_metadatas/1, get_values/1, get_value/1]). -export([vclock/1, update_value/2, update_metadata/2, bucket/1, value_count/1]). @@ -139,8 +139,9 @@ equal_contents([C1|R1],[C2|R2]) -> % merged). If AllowMultiple is false, the riak_object returned will % contain the value of the most-recently-updated object, as per the % X-Riak-Last-Modified header. -reconcile(Objects, AllowMultiple) -> - RObjs = reconcile(Objects), +reconcile(Objs, AllowMultiple) -> reconcile(Objs, [], AllowMultiple). +reconcile(Current, New, AllowMultiple) -> + RObjs = reconcile_sync(Current, New), AllContents = lists:flatten([O#r_object.contents || O <- RObjs]), Contents = case AllowMultiple of false -> @@ -163,11 +164,17 @@ reconcile(Objects, AllowMultiple) -> %% @spec reconcile([riak_object()]) -> [riak_object()] -reconcile(Objects) -> - AllClocks = [vclock(O) || O <- Objects], - SyncClocks = lists:foldl(fun(X,Y) -> dottedvv:sync(X,Y) end, dottedvv:fresh(), AllClocks), +reconcile_sync(Current, New) -> + ClockNew = vclock(New), + {Curr, ClocksCurrent} = + case is_list(Current) of + true -> {Current, lists:flatten([vclock(O) || O <- Current])}; + false -> {[Current], vclock(Current)} + end, + SyncClocks = dottedvv:sync(ClocksCurrent, ClockNew), + AllObjs = Curr ++ [New], Objs = - [[Obj || Obj <- Objects, dottedvv:descends(vclock(Obj), C)] + [[Obj || Obj <- AllObjs, (dottedvv:descends(vclock(Obj), C))] || C <- SyncClocks], remove_duplicate_objects(lists:flatten(Objs)). @@ -245,6 +252,7 @@ key(#r_object{key=Key}) -> Key. %% @spec vclock(riak_object()) -> [dottedvv:dottedvv()] %% @doc Return the dotted version vector(s) for this riak_object. +vclock([]) -> {}; vclock(#r_object{contents=C}) -> [Content#r_content.dvvclock || Content <- C]. @@ -417,10 +425,13 @@ from_json(Obj) -> jsonify_metadata(MD) -> MDJS = fun({LastMod, Now={_,_,_}}) -> - % convert Now to JS-readable time string + %% convert Now to JS-readable time string {LastMod, list_to_binary( httpd_util:rfc1123_date( calendar:now_to_local_time(Now)))}; + %% When the user metadata is empty, it should still be a struct + ({?MD_USERMETA, []}) -> + {?MD_USERMETA, {struct, []}}; ({<<"Links">>, Links}) -> {<<"Links">>, [ [B, K, T] || {{B, K}, T} <- Links ]}; ({Name, List=[_|_]}) -> @@ -478,7 +489,7 @@ dejsonify_values([{<<"metadata">>, {struct, MD0}}, <<"Links">> -> {Key, [{{B, K}, Tag} || [B, K, Tag] <- Val]}; <<"X-Riak-Last-Modified">> -> - {Key, erlang:now()}; + {Key, os:timestamp()}; _ -> {Key, if is_binary(Val) -> @@ -529,7 +540,7 @@ syntactic_merge(CurrentObject, NewObject) -> false -> CurrentObject end, - reconcile([UpdatedNew, UpdatedCurr], true). + reconcile(UpdatedCurr, UpdatedNew, true). -ifdef(TEST). @@ -557,8 +568,8 @@ update_test() -> reconcile_test() -> {O,O2} = update_test(), O3 = riak_object:increment_vclock(O2,self()), - O3 = riak_object:reconcile([O,O3],true), - O3 = riak_object:reconcile([O,O3],false), + O3 = riak_object:reconcile(O,O3,true), + O3 = riak_object:reconcile(O,O3,false), {O,O3}. merge1_test() -> @@ -688,7 +699,7 @@ date_reconcile_test() -> httpd_util:rfc1123_date( calendar:gregorian_seconds_to_datetime(D+1)), get_metadata(O3)))), - O5 = riak_object:reconcile([O2,O4], false), + O5 = riak_object:reconcile(O2,O4, false), false = riak_object:equal(O2, O5), false = riak_object:equal(O4, O5). From 2187ee1b22d1e6f56b1b3fca968e7d29e0448143 Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Sat, 24 Nov 2012 22:18:22 +0000 Subject: [PATCH 07/25] Use my fork of riak_core with supoport to dotted version vectors --- rebar.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rebar.config b/rebar.config index d2924d233c..9d1211000d 100644 --- a/rebar.config +++ b/rebar.config @@ -9,7 +9,7 @@ ]}. {deps, [ - {riak_core, ".*", {git, "git://github.com/basho/riak_core", "master"}}, + {riak_core, ".*", {git, "git://github.com/ricardobcl/riak_core", "master"}}, {erlang_js, ".*", {git, "git://github.com/basho/erlang_js", "master"}}, {bitcask, ".*", {git, "git://github.com/basho/bitcask", "master"}}, {merge_index, ".*", {git, "git://github.com/basho/merge_index", From fcc0578563bffc73e72a5acf9dc6b29753a7185c Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Tue, 27 Nov 2012 17:52:59 +0000 Subject: [PATCH 08/25] Script to run dstat --- dstat_script.sh | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 dstat_script.sh diff --git a/dstat_script.sh b/dstat_script.sh new file mode 100644 index 0000000000..e694cc78ac --- /dev/null +++ b/dstat_script.sh @@ -0,0 +1,2 @@ +#!/bin/bash +dstat --output /home/gsd/tome/dstat_riak.csv -talms 1 30000 > /dev/null From 7c6f4d79677a9147312387fb7c68254033eb1b4c Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Fri, 30 Nov 2012 15:59:45 +0000 Subject: [PATCH 09/25] Deleted dstat_script.sh file --- dstat_script.sh | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 dstat_script.sh diff --git a/dstat_script.sh b/dstat_script.sh deleted file mode 100644 index e694cc78ac..0000000000 --- a/dstat_script.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -dstat --output /home/gsd/tome/dstat_riak.csv -talms 1 30000 > /dev/null From d82b865ef73e5442c7a910739ca86b9bd5203118 Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Sat, 1 Dec 2012 17:05:25 +0000 Subject: [PATCH 10/25] Corrected duplicate riak_kv_index_req_v2 in riak_kv_vnode.hrl --- include/riak_kv_vnode.hrl | 6 ------ 1 file changed, 6 deletions(-) diff --git a/include/riak_kv_vnode.hrl b/include/riak_kv_vnode.hrl index e0da2175e7..56adf0850f 100644 --- a/include/riak_kv_vnode.hrl +++ b/include/riak_kv_vnode.hrl @@ -39,12 +39,6 @@ item_filter :: riak_kv_coverage_filter:filter(), qry :: riak_index:query_def()}). -%% same as _v1, but triggers ack-based backpressure --record(riak_kv_index_req_v2, { - bucket :: binary() | tuple(), - item_filter :: function(), - qry :: riak_index:query_def()}). - -record(riak_kv_vnode_status_req_v1, {}). -record(riak_kv_delete_req_v1, { From 94aaa7f81b3a8ac6a65f383f2ae4dffcacf4754f Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Wed, 12 Dec 2012 22:07:14 +0000 Subject: [PATCH 11/25] Delete extra files that were generated from mergetool --- src/riak_kv.app.src.orig | 55 -- src/riak_kv_app.erl.orig | 209 ----- src/riak_kv_get_core.erl.orig | 232 ----- src/riak_kv_get_fsm.erl.orig | 680 -------------- src/riak_kv_mrc_pipe.erl.orig | 1062 --------------------- src/riak_kv_mrc_sink.erl.orig | 442 --------- src/riak_kv_put_fsm.erl.orig | 886 ------------------ src/riak_kv_stat.erl.orig | 383 -------- src/riak_kv_stat_bc.erl.orig | 417 --------- src/riak_kv_sup.erl.orig | 139 --- src/riak_kv_util.erl.orig | 266 ------ src/riak_kv_vnode.erl.orig | 1651 --------------------------------- test/fsm_eqc_util.erl.orig | 320 ------- test/keys_fsm_eqc.erl.orig | 221 ----- 14 files changed, 6963 deletions(-) delete mode 100644 src/riak_kv.app.src.orig delete mode 100644 src/riak_kv_app.erl.orig delete mode 100644 src/riak_kv_get_core.erl.orig delete mode 100644 src/riak_kv_get_fsm.erl.orig delete mode 100644 src/riak_kv_mrc_pipe.erl.orig delete mode 100644 src/riak_kv_mrc_sink.erl.orig delete mode 100644 src/riak_kv_put_fsm.erl.orig delete mode 100644 src/riak_kv_stat.erl.orig delete mode 100644 src/riak_kv_stat_bc.erl.orig delete mode 100644 src/riak_kv_sup.erl.orig delete mode 100644 src/riak_kv_util.erl.orig delete mode 100644 src/riak_kv_vnode.erl.orig delete mode 100644 test/fsm_eqc_util.erl.orig delete mode 100644 test/keys_fsm_eqc.erl.orig diff --git a/src/riak_kv.app.src.orig b/src/riak_kv.app.src.orig deleted file mode 100644 index c0ab06cb7b..0000000000 --- a/src/riak_kv.app.src.orig +++ /dev/null @@ -1,55 +0,0 @@ -%% -*- tab-width: 4;erlang-indent-level: 4;indent-tabs-mode: nil -*- -%% ex: ts=4 sw=4 et -{application, riak_kv, - [ - {description, "Riak Key/Value Store"}, -<<<<<<< HEAD - {vsn, "1.2.1"}, -======= - {vsn, "1.2.1p3"}, ->>>>>>> master - {applications, [ - kernel, - stdlib, - sasl, - crypto, - riak_api, - riak_core, - erlang_js, - mochiweb, - webmachine, - os_mon, - riak_pipe - ]}, - {registered, []}, - {mod, {riak_kv_app, []}}, - {env, [ - %% Endpoint for system stats HTTP provider - {stats_urlpath, "stats"}, - - %% Secondary code paths - {add_paths, []}, - - %% This option toggles compatibility of keylisting with 1.0 - %% and earlier versions. Once a rolling upgrade to a version - %% > 1.0 is completed for a cluster, this should be set to - %% true for better control of memory usage during key listing - %% operations - {listkeys_backpressure, false}, - - %% use the legacy routines for tracking kv stats - {legacy_stats, true}, - - %% Enable active anti-entropy - {anti_entropy, {on, []}}, - - %% Allow Erlang MapReduce functions to be specified as - %% strings. - %% - %% !!!WARNING!!! - %% This will allow arbitrary Erlang code to be submitted - %% through the REST and Protocol Buffers interfaces. This - %% should only be used for development purposes. - {allow_strfun, false} - ]} - ]}. diff --git a/src/riak_kv_app.erl.orig b/src/riak_kv_app.erl.orig deleted file mode 100644 index ea8ddf7e79..0000000000 --- a/src/riak_kv_app.erl.orig +++ /dev/null @@ -1,209 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_app: application startup for Riak -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. - -%% @doc Bootstrapping the Riak application. - --module(riak_kv_app). - --behaviour(application). --export([start/2, prep_stop/1, stop/1]). - --define(SERVICES, [{riak_kv_pb_object, 3, 6}, %% ClientID stuff - {riak_kv_pb_object, 9, 14}, %% Object requests - {riak_kv_pb_bucket, 15, 22}, %% Bucket requests - {riak_kv_pb_mapred, 23, 24}, %% MapReduce requests - {riak_kv_pb_index, 25, 26} %% Secondary index requests - ]). - -%% @spec start(Type :: term(), StartArgs :: term()) -> -%% {ok,Pid} | ignore | {error,Error} -%% @doc The application:start callback for riak. -%% Arguments are ignored as all configuration is done via the erlenv file. -start(_Type, _StartArgs) -> - riak_core_util:start_app_deps(riak_kv), - - %% Look at the epoch and generating an error message if it doesn't match up - %% to our expectations - check_epoch(), - - %% Append user-provided code paths - case app_helper:get_env(riak_kv, add_paths) of - List when is_list(List) -> - ok = code:add_paths(List); - _ -> - ok - end, - - %% Append defaults for riak_kv buckets to the bucket defaults - %% TODO: Need to revisit this. Buckets are typically created - %% by a specific entity; seems lame to append a bunch of unused - %% metadata to buckets that may not be appropriate for the bucket. - riak_core_bucket:append_bucket_defaults( - [{linkfun, {modfun, riak_kv_wm_link_walker, mapreduce_linkfun}}, - {old_vclock, 86400}, - {young_vclock, 20}, - {big_vclock, 50}, - {small_vclock, 50}, - {pr, 0}, - {r, quorum}, - {w, quorum}, - {pw, 0}, - {dw, quorum}, - {rw, quorum}, - {basic_quorum, false}, - {notfound_ok, true} - ]), - - %% Check the storage backend - StorageBackend = app_helper:get_env(riak_kv, storage_backend), - case code:ensure_loaded(StorageBackend) of - {error,nofile} -> - lager:critical("storage_backend ~p is non-loadable.", - [StorageBackend]), - throw({error, invalid_storage_backend}); - _ -> - ok - end, - - %% Register our cluster_info app callback modules, with catch if - %% the app is missing or packaging is broken. - catch cluster_info:register_app(riak_kv_cinfo), - - %% Spin up supervisor - case riak_kv_sup:start_link() of - {ok, Pid} -> - %% Register capabilities - riak_core_capability:register({riak_kv, vnode_vclocks}, - [true, false], - false, - {riak_kv, - vnode_vclocks, - [{true, true}, {false, false}]}), - - riak_core_capability:register({riak_kv, legacy_keylisting}, - [false], - false, - {riak_kv, - legacy_keylisting, - [{false, false}]}), - - riak_core_capability:register({riak_kv, listkeys_backpressure}, - [true, false], - false, - {riak_kv, - listkeys_backpressure, - [{true, true}, {false, false}]}), - - riak_core_capability:register({riak_kv, index_backpressure}, - [true, false], - false), - - %% mapred_system should remain until no nodes still exist - %% that would propose 'legacy' as the default choice - riak_core_capability:register({riak_kv, mapred_system}, - [pipe], - pipe, - {riak_kv, - mapred_system, - [{pipe, pipe}]}), - - riak_core_capability:register({riak_kv, mapred_2i_pipe}, - [true, false], - false, - {riak_kv, - mapred_2i_pipe, - [{true, true}, {false, false}]}), - -<<<<<<< HEAD -======= - riak_core_capability:register({riak_kv, anti_entropy}, - [enabled_v1, disabled], - disabled), - ->>>>>>> master - %% Go ahead and mark the riak_kv service as up in the node watcher. - %% The riak_core_ring_handler blocks until all vnodes have been started - %% synchronously. - riak_core:register(riak_kv, [ - {vnode_module, riak_kv_vnode}, - {bucket_validator, riak_kv_bucket}, - {stat_mod, riak_kv_stat} - ]), - - ok = riak_api_pb_service:register(?SERVICES), - - %% Add routes to webmachine - [ webmachine_router:add_route(R) - || R <- lists:reverse(riak_kv_web:dispatch_table()) ], - {ok, Pid}; - {error, Reason} -> - {error, Reason} - end. - -%% @doc Prepare to stop - called before the supervisor tree is shutdown -prep_stop(_State) -> - try %% wrap with a try/catch - application carries on regardless, - %% no error message or logging about the failure otherwise. - - lager:info("Stopping application riak_kv - marked service down.\n", []), - riak_core_node_watcher:service_down(riak_kv) - - %% TODO: Gracefully unregister riak_kv webmachine endpoints. - %% Cannot do this currently as it calls application:set_env while this function - %% is itself inside of application controller. webmachine really needs it's own - %% ETS table for dispatch information. - %%[ webmachine_router:remove_route(R) || R <- riak_kv_web:dispatch_table() ], - catch - Type:Reason -> - lager:error("Stopping application riak_api - ~p:~p.\n", [Type, Reason]) - end, - stopping. - -%% @spec stop(State :: term()) -> ok -%% @doc The application:stop callback for riak. -stop(_State) -> - ok = riak_api_pb_service:deregister(?SERVICES), - lager:info("Stopped application riak_kv.\n", []), - ok. - -%% 719528 days from Jan 1, 0 to Jan 1, 1970 -%% *86400 seconds/day --define(SEC_TO_EPOCH, 62167219200). - -%% @spec check_epoch() -> ok -%% @doc -check_epoch() -> - %% doc for erlang:now/0 says return value is platform-dependent - %% -> let's emit an error if this platform doesn't think the epoch - %% is Jan 1, 1970 - {MSec, Sec, _} = os:timestamp(), - GSec = calendar:datetime_to_gregorian_seconds( - calendar:universal_time()), - case GSec - ((MSec*1000000)+Sec) of - N when (N < ?SEC_TO_EPOCH+5 andalso N > ?SEC_TO_EPOCH-5); - (N < -?SEC_TO_EPOCH+5 andalso N > -?SEC_TO_EPOCH-5) -> - %% if epoch is within 10 sec of expected, accept it - ok; - N -> - Epoch = calendar:gregorian_seconds_to_datetime(N), - lager:error("Riak expects your system's epoch to be Jan 1, 1970," - "but your system says the epoch is ~p", [Epoch]), - ok - end. diff --git a/src/riak_kv_get_core.erl.orig b/src/riak_kv_get_core.erl.orig deleted file mode 100644 index ae0ab02618..0000000000 --- a/src/riak_kv_get_core.erl.orig +++ /dev/null @@ -1,232 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_get_core: Riak get logic -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(riak_kv_get_core). --export([init/6, add_result/3, result_shortcode/1, enough/1, response/1, - has_all_results/1, final_action/1, info/1]). --export_type([getcore/0, result/0, reply/0, final_action/0]). - --type result() :: {ok, riak_object:riak_object()} | - {error, notfound} | % for dialyzer - {error, any()}. --type reply() :: {ok, riak_object:riak_object()} | - {error, notfound} | - {error, any()}. --type repair_reason() :: notfound | outofdate. --type final_action() :: nop | - {read_repair, [{non_neg_integer() | repair_reason()}], riak_object:riak_object()} | - delete. --type idxresult() :: {non_neg_integer(), result()}. - --record(getcore, {n :: pos_integer(), - r :: pos_integer(), - fail_threshold :: pos_integer(), - notfound_ok :: boolean(), - allow_mult :: boolean(), - deletedvclock :: boolean(), - results = [] :: [idxresult()], - merged :: {notfound | tombstone | ok, - riak_object:riak_object() | undefined}, - num_ok = 0 :: non_neg_integer(), - num_notfound = 0 :: non_neg_integer(), - num_fail = 0 :: non_neg_integer()}). --opaque getcore() :: #getcore{}. - -%% ==================================================================== -%% Public API -%% ==================================================================== - -%% Initialize a get and return an opaque get core context --spec init(pos_integer(), pos_integer(), pos_integer(), boolean(), boolean(), - boolean()) -> getcore(). -init(N, R, FailThreshold, NotFoundOk, AllowMult, DeletedVClock) -> - #getcore{n = N, - r = R, - fail_threshold = FailThreshold, - notfound_ok = NotFoundOk, - allow_mult = AllowMult, - deletedvclock = DeletedVClock}. - -%% Add a result for a vnode index --spec add_result(non_neg_integer(), result(), getcore()) -> getcore(). -add_result(Idx, Result, GetCore = #getcore{results = Results}) -> - UpdResults = [{Idx, Result} | Results], - case Result of - {ok, _RObj} -> - GetCore#getcore{results = UpdResults, merged = undefined, - num_ok = GetCore#getcore.num_ok + 1}; - {error, notfound} -> - case GetCore#getcore.notfound_ok of - true -> - GetCore#getcore{results = UpdResults, merged = undefined, - num_ok = GetCore#getcore.num_ok + 1}; - _ -> - GetCore#getcore{results = UpdResults, merged = undefined, - num_notfound = GetCore#getcore.num_notfound + 1} - end; - {error, _Reason} -> - GetCore#getcore{results = UpdResults, merged = undefined, - num_fail = GetCore#getcore.num_fail + 1} - end. - -result_shortcode({ok, _RObj}) -> 1; -result_shortcode({error, notfound}) -> 0; -result_shortcode(_) -> -1. - -%% Check if enough results have been added to respond --spec enough(getcore()) -> boolean(). -enough(#getcore{r = R, num_ok = NumOk, - num_notfound = NumNotFound, - num_fail = NumFail, - fail_threshold = FailThreshold}) -> - if - NumOk >= R -> - true; - NumNotFound + NumFail >= FailThreshold -> - true; - true -> - false - end. - -%% Get success/fail response once enough results received --spec response(getcore()) -> {reply(), getcore()}. -response(GetCore = #getcore{r = R, num_ok = NumOk, num_notfound = NumNotFound, - results = Results, allow_mult = AllowMult, - deletedvclock = DeletedVClock}) -> - {ObjState, MObj} = Merged = merge(Results, AllowMult), - Reply = case NumOk >= R of - true -> - case ObjState of - ok -> - Merged; % {ok, MObj} - tombstone when DeletedVClock -> - {error, {deleted, riak_object:vclock(MObj)}}; - _ -> % tombstone or notfound - {error, notfound} - end; - false -> - DelObjs = length([xx || {_Idx, {ok, RObj}} <- Results, - riak_kv_util:is_x_deleted(RObj)]), - Fails = [F || F = {_Idx, {error, Reason}} <- Results, - Reason /= notfound], - fail_reply(R, NumOk, NumOk - DelObjs, - NumNotFound + DelObjs, Fails) - end, - {Reply, GetCore#getcore{merged = Merged}}. - -%% Check if all expected results have been added --spec has_all_results(getcore()) -> boolean(). -has_all_results(#getcore{n = N, num_ok = NOk, - num_fail = NFail, num_notfound = NNF}) -> - NOk + NFail + NNF >= N. - -%% Decide on any post-response actions -%% nop - do nothing -%% {readrepair, Indices, MObj} - send read repairs iff any vnode has ancestor data -%% (including tombstones) -%% delete - issue deletes if all vnodes returned tombstones. This needs to be -%% supplemented with a check that the vnodes were all primaries. -%% --spec final_action(getcore()) -> {final_action(), getcore()}. -final_action(GetCore = #getcore{n = N, merged = Merged0, results = Results, - allow_mult = AllowMult}) -> - Merged = case Merged0 of - undefined -> - merge(Results, AllowMult); - _ -> - Merged0 - end, - {ObjState, MObj} = Merged, - ReadRepairs = case ObjState of - notfound -> - []; - _ -> % ok or tombstone - [{Idx, outofdate} || {Idx, {ok, RObj}} <- Results, - strict_descendant(MObj, RObj)] ++ - [{Idx, notfound} || {Idx, {error, notfound}} <- Results] - end, - Action = case ReadRepairs of - [] when ObjState == tombstone -> - %% Allow delete if merge object is deleted, - %% there are no read repairs pending and - %% a value was received from all vnodes - case riak_kv_util:is_x_deleted(MObj) andalso - length([xx || {_Idx, {ok, _RObj}} <- Results]) == N of - true -> - delete; - _ -> - nop - end; - [] -> - nop; - _ -> - {read_repair, ReadRepairs, MObj} - end, - {Action, GetCore#getcore{merged = Merged}}. - -%% Return request info --spec info(undefined | getcore()) -> [{vnode_oks, non_neg_integer()} | - {vnode_errors, [any()]}]. - -info(undefined) -> - []; % make uninitialized case easier -info(#getcore{num_ok = NumOks, num_fail = NumFail, results = Results}) -> - Oks = [{vnode_oks, NumOks}], - case NumFail of - 0 -> - Oks; - _ -> - Errors = [Reason || {_Idx, {error, Reason}} <- Results, - Reason /= undefined], - [{vnode_errors, Errors} | Oks] - end. - -%% ==================================================================== -%% Internal functions -%% ==================================================================== - -strict_descendant(O1, O2) -> -<<<<<<< HEAD - dottedvv:strict_descends(riak_object:vclock(O1),riak_object:vclock(O2)). -======= - vclock:descends(riak_object:vclock(O1),riak_object:vclock(O2)) andalso - not vclock:descends(riak_object:vclock(O2),riak_object:vclock(O1)). ->>>>>>> master - -merge(Replies, AllowMult) -> - RObjs = [RObj || {_I, {ok, RObj}} <- Replies], - case RObjs of - [] -> - {notfound, undefined}; - _ -> - Merged = riak_object:reconcile(RObjs, AllowMult), % include tombstones - case riak_kv_util:is_x_deleted(Merged) of - true -> - {tombstone, Merged}; - _ -> - {ok, Merged} - end - end. - -fail_reply(_R, _NumR, 0, NumNotFound, []) when NumNotFound > 0 -> - {error, notfound}; -fail_reply(R, NumR, _NumNotDeleted, _NumNotFound, _Fails) -> - {error, {r_val_unsatisfied, R, NumR}}. diff --git a/src/riak_kv_get_fsm.erl.orig b/src/riak_kv_get_fsm.erl.orig deleted file mode 100644 index d4912f9f84..0000000000 --- a/src/riak_kv_get_fsm.erl.orig +++ /dev/null @@ -1,680 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_get_fsm: coordination of Riak GET requests -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(riak_kv_get_fsm). --behaviour(gen_fsm). --include_lib("riak_kv_vnode.hrl"). --ifdef(TEST). --include_lib("eunit/include/eunit.hrl"). --export([test_link/7, test_link/5]). --endif. --export([start/6, start_link/6, start_link/4]). --export([init/1, handle_event/3, handle_sync_event/4, - handle_info/3, terminate/3, code_change/4]). --export([prepare/2,validate/2,execute/2,waiting_vnode_r/2,waiting_read_repair/2]). - --type detail() :: timing | - vnodes. --type details() :: [detail()]. - --type option() :: {r, pos_integer()} | %% Minimum number of successful responses - {pr, non_neg_integer()} | %% Minimum number of primary vnodes participating - {basic_quorum, boolean()} | %% Whether to use basic quorum (return early - %% in some failure cases. - {notfound_ok, boolean()} | %% Count notfound reponses as successful. - {timeout, pos_integer() | infinity} | %% Timeout for vnode responses - {details, details()} | %% Return extra details as a 3rd element - {details, true} | - details. - --type options() :: [option()]. --type req_id() :: non_neg_integer(). - --export_type([options/0, option/0]). - - - --record(state, {from :: {raw, req_id(), pid()}, - options=[] :: options(), - n :: pos_integer(), - preflist2 :: riak_core_apl:preflist2(), - req_id :: non_neg_integer(), - starttime :: pos_integer(), - get_core :: riak_kv_get_core:getcore(), - timeout :: infinity | pos_integer(), - tref :: reference(), - bkey :: {riak_object:bucket(), riak_object:key()}, - bucket_props, - startnow :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}, - get_usecs :: non_neg_integer(), - tracked_bucket=false :: boolean(), %% is per bucket stats enabled for this bucket - timing = [] :: [{atom(), erlang:timestamp()}], - calculated_timings :: {ResponseUSecs::non_neg_integer(), - [{StateName::atom(), TimeUSecs::non_neg_integer()}]} | undefined - }). - --include("riak_kv_dtrace.hrl"). - --define(DEFAULT_TIMEOUT, 60000). --define(DEFAULT_R, default). --define(DEFAULT_PR, 0). - -%% =================================================================== -%% Public API -%% =================================================================== - -%% In place only for backwards compatibility -start(ReqId,Bucket,Key,R,Timeout,From) -> - start_link({raw, ReqId, From}, Bucket, Key, [{r, R}, {timeout, Timeout}]). - -start_link(ReqId,Bucket,Key,R,Timeout,From) -> - start_link({raw, ReqId, From}, Bucket, Key, [{r, R}, {timeout, Timeout}]). - -%% @doc Start the get FSM - retrieve Bucket/Key with the options provided -%% -%% {r, pos_integer()} - Minimum number of successful responses -%% {pr, non_neg_integer()} - Minimum number of primary vnodes participating -%% {basic_quorum, boolean()} - Whether to use basic quorum (return early -%% in some failure cases. -%% {notfound_ok, boolean()} - Count notfound reponses as successful. -%% {timeout, pos_integer() | infinity} - Timeout for vnode responses --spec start_link({raw, req_id(), pid()}, binary(), binary(), options()) -> - {ok, pid()} | {error, any()}. -start_link(From, Bucket, Key, GetOptions) -> - gen_fsm:start_link(?MODULE, [From, Bucket, Key, GetOptions], []). - -%% =================================================================== -%% Test API -%% =================================================================== - --ifdef(TEST). -%% Create a get FSM for testing. StateProps must include -%% starttime - start time in gregorian seconds -%% n - N-value for request (is grabbed from bucket props in prepare) -%% bucket_props - bucket properties -%% preflist2 - [{{Idx,Node},primary|fallback}] preference list -%% -test_link(ReqId,Bucket,Key,R,Timeout,From,StateProps) -> - test_link({raw, ReqId, From}, Bucket, Key, [{r, R}, {timeout, Timeout}], StateProps). - -test_link(From, Bucket, Key, GetOptions, StateProps) -> - gen_fsm:start_link(?MODULE, {test, [From, Bucket, Key, GetOptions], StateProps}, []). - --endif. - -%% ==================================================================== -%% gen_fsm callbacks -%% ==================================================================== - -%% @private -init([From, Bucket, Key, Options]) -> - StartNow = os:timestamp(), - StateData = add_timing(prepare, #state{from = From, - options = Options, - bkey = {Bucket, Key}, - startnow = StartNow}), -<<<<<<< HEAD -======= - riak_kv_get_put_monitor:get_fsm_spawned(self()), ->>>>>>> master - riak_core_dtrace:put_tag(io_lib:format("~p,~p", [Bucket, Key])), - ?DTRACE(?C_GET_FSM_INIT, [], ["init"]), - {ok, prepare, StateData, 0}; -init({test, Args, StateProps}) -> - %% Call normal init - {ok, prepare, StateData, 0} = init(Args), - - %% Then tweak the state record with entries provided by StateProps - Fields = record_info(fields, state), - FieldPos = lists:zip(Fields, lists:seq(2, length(Fields)+1)), - F = fun({Field, Value}, State0) -> - Pos = proplists:get_value(Field, FieldPos), - setelement(Pos, State0, Value) - end, - TestStateData = lists:foldl(F, StateData, StateProps), - - %% Enter into the execute state, skipping any code that relies on the - %% state of the rest of the system - {ok, validate, TestStateData, 0}. - -%% @private -prepare(timeout, StateData=#state{bkey=BKey={Bucket,_Key}}) -> - ?DTRACE(?C_GET_FSM_PREPARE, [], ["prepare"]), - {ok, Ring} = riak_core_ring_manager:get_my_ring(), - BucketProps = riak_core_bucket:get_bucket(Bucket, Ring), - DocIdx = riak_core_util:chash_key(BKey), - N = proplists:get_value(n_val,BucketProps), - StatTracked = proplists:get_value(stat_tracked, BucketProps, false), - UpNodes = riak_core_node_watcher:nodes(riak_kv), - Preflist2 = riak_core_apl:get_apl_ann(DocIdx, N, Ring, UpNodes), - new_state_timeout(validate, StateData#state{starttime=riak_core_util:moment(), - n = N, - bucket_props=BucketProps, - preflist2 = Preflist2, - tracked_bucket = StatTracked}). - -%% @private -validate(timeout, StateData=#state{from = {raw, ReqId, _Pid}, options = Options, - n = N, bucket_props = BucketProps, preflist2 = PL2}) -> - ?DTRACE(?C_GET_FSM_VALIDATE, [], ["validate"]), -<<<<<<< HEAD - Timeout = get_option(timeout, Options, ?DEFAULT_TIMEOUT), -======= - AppEnvTimeout = app_helper:get_env(riak_kv, timeout), - Timeout = case AppEnvTimeout of - undefined -> get_option(timeout, Options, ?DEFAULT_TIMEOUT); - _ -> AppEnvTimeout - end, ->>>>>>> master - R0 = get_option(r, Options, ?DEFAULT_R), - PR0 = get_option(pr, Options, ?DEFAULT_PR), - R = riak_kv_util:expand_rw_value(r, R0, BucketProps, N), - PR = riak_kv_util:expand_rw_value(pr, PR0, BucketProps, N), - NumVnodes = length(PL2), - NumPrimaries = length([x || {_,primary} <- PL2]), - case validate_quorum(R, R0, N, PR, PR0, NumPrimaries, NumVnodes) of - ok -> - BQ0 = get_option(basic_quorum, Options, default), - FailThreshold = - case riak_kv_util:expand_value(basic_quorum, BQ0, BucketProps) of - true -> - erlang:min((N div 2)+1, % basic quorum, or - (N-R+1)); % cannot ever get R 'ok' replies - _ElseFalse -> - N - R + 1 % cannot ever get R 'ok' replies - end, - AllowMult = proplists:get_value(allow_mult,BucketProps), - NFOk0 = get_option(notfound_ok, Options, default), - NotFoundOk = riak_kv_util:expand_value(notfound_ok, NFOk0, BucketProps), - DeletedVClock = get_option(deletedvclock, Options, false), - GetCore = riak_kv_get_core:init(N, R, FailThreshold, - NotFoundOk, AllowMult, - DeletedVClock), - new_state_timeout(execute, StateData#state{get_core = GetCore, - timeout = Timeout, - req_id = ReqId}); - Error -> - StateData2 = client_reply(Error, StateData), - {stop, normal, StateData2} - end. - -%% @private validate the quorum values -%% {error, Message} or ok -validate_quorum(R, ROpt, _N, _PR, _PROpt, _NumPrimaries, _NumVnodes) when R =:= error -> - {error, {r_val_violation, ROpt}}; -validate_quorum(R, _ROpt, N, _PR, _PROpt, _NumPrimaries, _NumVnodes) when R > N -> - {error, {n_val_violation, N}}; -validate_quorum(_R, _ROpt, _N, PR, PROpt, _NumPrimaries, _NumVnodes) when PR =:= error -> - {error, {pr_val_violation, PROpt}}; -validate_quorum(_R, _ROpt, N, PR, _PROpt, _NumPrimaries, _NumVnodes) when PR > N -> - {error, {n_val_violation, N}}; -validate_quorum(_R, _ROpt, _N, PR, _PROpt, NumPrimaries, _NumVnodes) when PR > NumPrimaries -> - {error, {pr_val_unsatisfied, PR, NumPrimaries}}; -validate_quorum(R, _ROpt, _N, _PR, _PROpt, _NumPrimaries, NumVnodes) when R > NumVnodes -> - {error, {insufficient_vnodes, NumVnodes, need, R}}; -validate_quorum(_R, _ROpt, _N, _PR, _PROpt, _NumPrimaries, _NumVnodes) -> - ok. - -%% @private -execute(timeout, StateData0=#state{timeout=Timeout,req_id=ReqId, - bkey=BKey, - preflist2 = Preflist2}) -> - ?DTRACE(?C_GET_FSM_EXECUTE, [], ["execute"]), - TRef = schedule_timeout(Timeout), - Preflist = [IndexNode || {IndexNode, _Type} <- Preflist2], - Ps = preflist_for_tracing(Preflist), - ?DTRACE(?C_GET_FSM_PREFLIST, [], Ps), - riak_kv_vnode:get(Preflist, BKey, ReqId), - StateData = StateData0#state{tref=TRef}, - new_state(waiting_vnode_r, StateData). - -%% @private calculate a concatenated preflist for tracing macro -preflist_for_tracing(Preflist) -> - %% TODO: We can see entire preflist (more than 4 nodes) if we concatenate - %% all info into a single string. - [if is_atom(Nd) -> - [atom2list(Nd), $,, integer_to_list(Idx)]; - true -> - <<>> % eunit test - end || {Idx, Nd} <- lists:sublist(Preflist, 4)]. - -%% @private -waiting_vnode_r({r, VnodeResult, Idx, _ReqId}, StateData = #state{get_core = GetCore}) -> - ShortCode = riak_kv_get_core:result_shortcode(VnodeResult), - IdxStr = integer_to_list(Idx), - ?DTRACE(?C_GET_FSM_WAITING_R, [ShortCode], ["waiting_vnode_r", IdxStr]), - UpdGetCore = riak_kv_get_core:add_result(Idx, VnodeResult, GetCore), - case riak_kv_get_core:enough(UpdGetCore) of - true -> - {Reply, UpdGetCore2} = riak_kv_get_core:response(UpdGetCore), - NewStateData = client_reply(Reply, StateData#state{get_core = UpdGetCore2}), - update_stats(Reply, NewStateData), - maybe_finalize(NewStateData); - false -> - %% don't use new_state/2 since we do timing per state, not per message in state - {next_state, waiting_vnode_r, StateData#state{get_core = UpdGetCore}} - end; -waiting_vnode_r(request_timeout, StateData) -> - ?DTRACE(?C_GET_FSM_WAITING_R_TIMEOUT, [-2], ["waiting_vnode_r", "timeout"]), - S2 = client_reply({error,timeout}, StateData), - update_stats(timeout, S2), - finalize(S2). - -%% @private -waiting_read_repair({r, VnodeResult, Idx, _ReqId}, - StateData = #state{get_core = GetCore}) -> - ShortCode = riak_kv_get_core:result_shortcode(VnodeResult), - IdxStr = integer_to_list(Idx), - ?DTRACE(?C_GET_FSM_WAITING_RR, [ShortCode], - ["waiting_read_repair", IdxStr]), - UpdGetCore = riak_kv_get_core:add_result(Idx, VnodeResult, GetCore), - maybe_finalize(StateData#state{get_core = UpdGetCore}); -waiting_read_repair(request_timeout, StateData) -> - ?DTRACE(?C_GET_FSM_WAITING_RR_TIMEOUT, [-2], - ["waiting_read_repair", "timeout"]), - finalize(StateData). - -%% @private -handle_event(_Event, _StateName, StateData) -> - {stop,badmsg,StateData}. - -%% @private -handle_sync_event(_Event, _From, _StateName, StateData) -> - {stop,badmsg,StateData}. - -%% @private -handle_info(request_timeout, StateName, StateData) -> - ?MODULE:StateName(request_timeout, StateData); -%% @private -handle_info(_Info, _StateName, StateData) -> - {stop,badmsg,StateData}. - -%% @private -terminate(Reason, _StateName, _State) -> - Reason. - -%% @private -code_change(_OldVsn, StateName, State, _Extra) -> {ok, StateName, State}. - - -%% ==================================================================== -%% Internal functions -%% ==================================================================== - -%% Move to the new state, marking the time it started -new_state(StateName, StateData) -> - {next_state, StateName, add_timing(StateName, StateData)}. - -%% Move to the new state, marking the time it started and trigger an immediate -%% timeout. -new_state_timeout(StateName, StateData) -> - {next_state, StateName, add_timing(StateName, StateData), 0}. - -maybe_finalize(StateData=#state{get_core = GetCore}) -> - case riak_kv_get_core:has_all_results(GetCore) of - true -> finalize(StateData); - false -> {next_state,waiting_read_repair,StateData} - end. - -finalize(StateData=#state{get_core = GetCore}) -> - {Action, UpdGetCore} = riak_kv_get_core:final_action(GetCore), - UpdStateData = StateData#state{get_core = UpdGetCore}, - case Action of - delete -> - maybe_delete(UpdStateData); - {read_repair, Indices, RepairObj} -> - maybe_read_repair(Indices, RepairObj, UpdStateData); - _Nop -> - ?DTRACE(?C_GET_FSM_FINALIZE, [], ["finalize"]), - ok - end, - {stop,normal,StateData}. - -%% Maybe issue deletes if all primary nodes are available. -%% Get core will only requestion deletion if all vnodes -%% replies with the same value. -maybe_delete(_StateData=#state{n = N, preflist2=Sent, - req_id=ReqId, bkey=BKey}) -> - %% Check sent to a perfect preflist and we can delete - IdealNodes = [{I, Node} || {{I, Node}, primary} <- Sent], - case length(IdealNodes) == N of - true -> - ?DTRACE(?C_GET_FSM_MAYBE_DELETE, [1], - ["maybe_delete", "triggered"]), - riak_kv_vnode:del(IdealNodes, BKey, ReqId); - _ -> - ?DTRACE(?C_GET_FSM_MAYBE_DELETE, [0], - ["maybe_delete", "nop"]), - nop - end. - -%% based on what the get_put_monitor stats say, and a random roll, potentially -%% skip read-repriar -%% On a very busy system with many writes and many reads, it is possible to -%% get overloaded by read-repairs. By occasionally skipping read_repair we -%% can keep the load more managable; ie the only load on the system becomes -%% the gets, puts, etc. -maybe_read_repair(Indices, RepairObj, UpdStateData) -> - HardCap = app_helper:get_env(riak_kv, read_repair_max), - SoftCap = app_helper:get_env(riak_kv, read_repair_soft, HardCap), - Dorr = determine_do_read_repair(SoftCap, HardCap), - if - Dorr -> - read_repair(Indices, RepairObj, UpdStateData); - true -> - riak_kv_stat:update(skipped_read_repairs), - skipping - end. - -determine_do_read_repair(_SoftCap, HardCap) when HardCap == undefined -> - true; -determine_do_read_repair(SoftCap, HardCap) -> - Actual = riak_kv_get_put_monitor:gets_active(), - determine_do_read_repair(SoftCap, HardCap, Actual). - -determine_do_read_repair(undefined, HardCap, Actual) -> - determine_do_read_repair(HardCap, HardCap, Actual); -determine_do_read_repair(_SoftCap, HardCap, Actual) when HardCap =< Actual -> - false; -determine_do_read_repair(SoftCap, _HardCap, Actual) when Actual =< SoftCap -> - true; -determine_do_read_repair(SoftCap, HardCap, Actual) -> - Roll = roll_d100(), - determine_do_read_repair(SoftCap, HardCap, Actual, Roll). - -determine_do_read_repair(SoftCap, HardCap, Actual, Roll) -> - AdjustedActual = Actual - SoftCap, - AdjustedHard = HardCap - SoftCap, - Threshold = AdjustedActual / AdjustedHard * 100, - Threshold =< Roll. - --ifdef(TEST). -roll_d100() -> - fsm_eqc_util:get_fake_rng(get_fsm_qc). --else. -% technically not a d100 as it has a 0 -roll_d100() -> - crypto:rand_uniform(0, 100). --endif. - -%% Issue read repairs for any vnodes that are out of date -read_repair(Indices, RepairObj, - #state{req_id = ReqId, starttime = StartTime, - preflist2 = Sent, bkey = BKey, bucket_props = BucketProps}) -> - RepairPreflist = [{Idx, Node} || {{Idx, Node}, _Type} <- Sent, - proplists:get_value(Idx, Indices) /= undefined], - Ps = preflist_for_tracing(RepairPreflist), - ?DTRACE(?C_GET_FSM_RR, [], Ps), - riak_kv_vnode:readrepair(RepairPreflist, BKey, RepairObj, ReqId, - StartTime, [{returnbody, false}, - {bucket_props, BucketProps}]), - riak_kv_stat:update({read_repairs, Indices, Sent}). - - -get_option(Name, Options, Default) -> - proplists:get_value(Name, Options, Default). - -schedule_timeout(infinity) -> - undefined; -schedule_timeout(Timeout) -> - erlang:send_after(Timeout, self(), request_timeout). - -client_reply(Reply, StateData0 = #state{from = {raw, ReqId, Pid}, - options = Options}) -> - StateData = add_timing(reply, StateData0), - Msg = case proplists:get_value(details, Options, false) of - false -> - {ReqId, Reply}; - [] -> - {ReqId, Reply}; - Details -> - {OkError, ObjReason} = Reply, - Info = client_info(Details, StateData, []), - {ReqId, {OkError, ObjReason, Info}} - end, - Pid ! Msg, - ShortCode = riak_kv_get_core:result_shortcode(Reply), - %% calculate timings here, since the trace macro needs total response time - %% Stuff the result in state so we don't need to calculate it again - {ResponseUSecs, Stages} = riak_kv_fsm_timing:calc_timing(StateData#state.timing), - ?DTRACE(?C_GET_FSM_CLIENT_REPLY, [ShortCode, ResponseUSecs], ["client_reply"]), - StateData#state{calculated_timings={ResponseUSecs, Stages}}. - -update_stats({ok, Obj}, #state{tracked_bucket = StatTracked, calculated_timings={ResponseUSecs, Stages}}) -> - %% Stat the number of siblings and the object size, and timings - NumSiblings = riak_object:value_count(Obj), - Bucket = riak_object:bucket(Obj), - ObjSize = calculate_objsize(Bucket, Obj), - riak_kv_stat:update({get_fsm, Bucket, ResponseUSecs, Stages, NumSiblings, ObjSize, StatTracked}); -update_stats(_, #state{ bkey = {Bucket, _}, tracked_bucket = StatTracked, calculated_timings={ResponseUSecs, Stages}}) -> - riak_kv_stat:update({get_fsm, Bucket, ResponseUSecs, Stages, undefined, undefined, StatTracked}). - -%% Get an approximation of object size by adding together the bucket, key, -%% vectorclock, and all of the siblings. This is more complex than -%% calling term_to_binary/1, but it should be easier on memory, -%% especially for objects with large values. -calculate_objsize(Bucket, Obj) -> - Contents = riak_object:get_contents(Obj), - size(Bucket) + - size(riak_object:key(Obj)) + - size(term_to_binary(riak_object:vclock(Obj))) + - lists:sum([size(term_to_binary(MD)) + value_size(Value) || {MD, Value} <- Contents]). - -value_size(Value) when is_binary(Value) -> size(Value); -value_size(Value) -> size(term_to_binary(Value)). - -client_info(true, StateData, Acc) -> - client_info(details(), StateData, Acc); -client_info([], _StateData, Acc) -> - Acc; -client_info([timing | Rest], StateData = #state{timing=Timing}, Acc) -> - {ResponseUsecs, Stages} = riak_kv_fsm_timing:calc_timing(Timing), - client_info(Rest, StateData, [{response_usecs, ResponseUsecs}, - {stages, Stages} | Acc]); -client_info([vnodes | Rest], StateData = #state{get_core = GetCore}, Acc) -> - Info = riak_kv_get_core:info(GetCore), - client_info(Rest, StateData, Info ++ Acc); -client_info([Unknown | Rest], StateData, Acc) -> - client_info(Rest, StateData, [{Unknown, unknown_detail} | Acc]). - -%% Add timing information to the state -add_timing(Stage, State = #state{timing = Timing}) -> - State#state{timing = riak_kv_fsm_timing:add_timing(Stage, Timing)}. - -details() -> - [timing, - vnodes]. - -atom2list(A) when is_atom(A) -> - atom_to_list(A); -atom2list(P) when is_pid(P)-> - pid_to_list(P). % eunit tests - --ifdef(TEST). --define(expect_msg(Exp,Timeout), - ?assertEqual(Exp, receive Exp -> Exp after Timeout -> timeout end)). - -%% SLF: Comment these test cases because of OTP app dependency -%% changes: riak_kv_vnode:test_vnode/1 now relies on riak_core to -%% be running ... eventually there's a call to -%% riak_core_ring_manager:get_raw_ring(). - -determine_do_read_repair_test_() -> - [ - {"soft cap is undefined, actual below", ?_assert(determine_do_read_repair(undefined, 7, 5))}, - {"soft cap is undefined, actual above", ?_assertNot(determine_do_read_repair(undefined, 7, 10))}, - {"soft cap is undefined, actual at", ?_assertNot(determine_do_read_repair(undefined, 7, 7))}, - {"hard cap is undefiend", ?_assert(determine_do_read_repair(3000, undefined))}, - {"actual below soft cap", ?_assert(determine_do_read_repair(3000, 7000, 2000))}, - {"actual equals soft cap", ?_assert(determine_do_read_repair(3000, 7000, 3000))}, - {"actual above hard cap", ?_assertNot(determine_do_read_repair(3000, 7000, 9000))}, - {"actaul equals hard cap", ?_assertNot(determine_do_read_repair(3000, 7000, 7000))}, - {"hard cap == soft cap, actual below", ?_assert(determine_do_read_repair(100, 100, 50))}, - {"hard cap == soft cap, actual above", ?_assertNot(determine_do_read_repair(100, 100, 150))}, - {"hard cap == soft cap, actual equals", ?_assertNot(determine_do_read_repair(100, 100, 100))}, - {"roll below threshold", ?_assertNot(determine_do_read_repair(5000, 15000, 10000, 1))}, - {"roll exactly threshold", ?_assert(determine_do_read_repair(5000, 15000, 10000, 50))}, - {"roll above threshold", ?_assert(determine_do_read_repair(5000, 15000, 10000, 70))} - ]. - --ifdef(BROKEN_EUNIT_PURITY_VIOLATION). -get_fsm_test_() -> - {spawn, [{ setup, - fun setup/0, - fun cleanup/1, - [ - fun happy_path_case/0, - fun n_val_violation_case/0 - ] - }]}. - -setup() -> - %% Set infinity timeout for the vnode inactivity timer so it does not - %% try to handoff. - application:load(riak_core), - application:set_env(riak_core, vnode_inactivity_timeout, infinity), - application:load(riak_kv), - application:set_env(riak_kv, storage_backend, riak_kv_memory_backend), - application:set_env(riak_core, default_bucket_props, [{r, quorum}, - {w, quorum}, {pr, 0}, {pw, 0}, {rw, quorum}, {n_val, 3}, - {basic_quorum, true}, {notfound_ok, false}]), - - %% Have tracer on hand to grab any traces we want - riak_core_tracer:start_link(), - riak_core_tracer:reset(), - riak_core_tracer:filter([{riak_kv_vnode, readrepair}], - fun({trace, _Pid, call, - {riak_kv_vnode, readrepair, - [Preflist, _BKey, Obj, ReqId, _StartTime, _Options]}}) -> - [{rr, Preflist, Obj, ReqId}] - end), - ok. - -cleanup(_) -> - dbg:stop_clear(). - -happy_path_case() -> - riak_core_tracer:collect(5000), - - %% Start 3 vnodes - Indices = [1, 2, 3], - Preflist2 = [begin - {ok, Pid} = riak_kv_vnode:test_vnode(Idx), - {{Idx, Pid}, primary} - end || Idx <- Indices], - Preflist = [IdxPid || {IdxPid,_Type} <- Preflist2], - - %% Decide on some parameters - Bucket = <<"mybucket">>, - Key = <<"mykey">>, - Nval = 3, - BucketProps = bucket_props(Bucket, Nval), - - %% Start the FSM to issue a get and check notfound - - ReqId1 = 112381838, % erlang:phash2(erlang:now()). - R = 2, - Timeout = 1000, - {ok, _FsmPid1} = test_link(ReqId1, Bucket, Key, R, Timeout, self(), - [{starttime, 63465712389}, - {n, Nval}, - {bucket_props, BucketProps}, - {preflist2, Preflist2}]), - ?assertEqual({error, notfound}, wait_for_reqid(ReqId1, Timeout + 1000)), - - %% Update the first two vnodes with a value - ReqId2 = 49906465, - Value = <<"value">>, - Obj1 = riak_object:new(Bucket, Key, Value), - riak_kv_vnode:put(lists:sublist(Preflist, 2), {Bucket, Key}, Obj1, ReqId2, - 63465715958, [{bucket_props, BucketProps}], {raw, ReqId2, self()}), - ?expect_msg({ReqId2, {w, 1, ReqId2}}, Timeout + 1000), - ?expect_msg({ReqId2, {w, 2, ReqId2}}, Timeout + 1000), - ?expect_msg({ReqId2, {dw, 1, ReqId2}}, Timeout + 1000), - ?expect_msg({ReqId2, {dw, 2, ReqId2}}, Timeout + 1000), - - %% Issue a get, check value returned. - ReqId3 = 30031523, - {ok, _FsmPid2} = test_link(ReqId3, Bucket, Key, R, Timeout, self(), - [{starttime, 63465712389}, - {n, Nval}, - {bucket_props, BucketProps}, - {preflist2, Preflist2}]), - ?assertEqual({ok, Obj1}, wait_for_reqid(ReqId3, Timeout + 1000)), - - %% Check readrepair issued to third node - ExpRRPrefList = lists:sublist(Preflist, 3, 1), - riak_kv_test_util:wait_for_pid(_FsmPid2), - riak_core_tracer:stop_collect(), - ?assertEqual([{0, {rr, ExpRRPrefList, Obj1, ReqId3}}], - riak_core_tracer:results()). - - -n_val_violation_case() -> - ReqId1 = 13210434, % erlang:phash2(erlang:now()). - Bucket = <<"mybucket">>, - Key = <<"badnvalkey">>, - Nval = 3, - R = 5, - Timeout = 1000, - BucketProps = bucket_props(Bucket, Nval), - %% Fake three nodes - Indices = [1, 2, 3], - Preflist2 = [begin - {{Idx, self()}, primary} - end || Idx <- Indices], - {ok, _FsmPid1} = test_link(ReqId1, Bucket, Key, R, Timeout, self(), - [{starttime, 63465712389}, - {n, Nval}, - {bucket_props, BucketProps}, - {preflist2, Preflist2}]), - ?assertEqual({error, {n_val_violation, 3}}, wait_for_reqid(ReqId1, Timeout + 1000)). - - -wait_for_reqid(ReqId, Timeout) -> - receive - {ReqId, Msg} -> Msg - after Timeout -> - {error, req_timeout} - end. - -bucket_props(Bucket, Nval) -> % riak_core_bucket:get_bucket(Bucket). - [{name, Bucket}, - {allow_mult,false}, - {big_vclock,50}, - {chash_keyfun,{riak_core_util,chash_std_keyfun}}, - {dw,quorum}, - {last_write_wins,false}, - {linkfun,{modfun,riak_kv_wm_link_walker,mapreduce_linkfun}}, - {n_val,Nval}, - {old_vclock,86400}, - {postcommit,[]}, - {precommit,[]}, - {r,quorum}, - {rw,quorum}, - {small_vclock,50}, - {w,quorum}, - {young_vclock,20}]. - - --endif. % BROKEN_EUNIT_PURITY_VIOLATION --endif. diff --git a/src/riak_kv_mrc_pipe.erl.orig b/src/riak_kv_mrc_pipe.erl.orig deleted file mode 100644 index bae630a3a3..0000000000 --- a/src/riak_kv_mrc_pipe.erl.orig +++ /dev/null @@ -1,1062 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2011 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc Riak KV MapReduce / Riak Pipe Compatibility -%% -%% == About using `{modfun, Mod, Fun, Arg}' generator to a MapReduce job == -%% -%% The six methods of specifying input for a MapReduce job are: -%% -%%
    -%%
  1. Specify a bucket name (to emit all bucket/key pairs for that -%% bucket)
  2. -%%
  3. Specify a bucket name and keyfilter spec, `{Bucket, -%% KeyFilter}'
  4. -%%
  5. Specify an explicit list of bucket/key pairs
  6. -%%
  7. Specify `{index, Bucket, Index, Key}' or `{index, Bucket, -%% Index, StartKey, EndKey}' to query secondary indexes and send -%% matching keys into the MR as inputs.
  8. -%%
  9. Specify `{search, Bucket, Query}' or `{search, Bucket, Query, -%% Filter}' to query Riak Search and send matching keys into the -%% MR as inputs.
  10. -%%
  11. Specify `{modfun, Mod, Fun, Arg}' to generate the raw input -%% data for the rest of the workflow
  12. -%%
-%% -%% For the final method, "raw input data" means that the output of the -%% function will be used as-is by the next item MapReduce workflow. -%% If that next item is a map phase, then that item's input is -%% expected to be a bucket/key pair. If the next item is a reduce -%% phase, then the input can be an arbitrary term. -%% -%% The type specification for a `{modfun, Mod, Fun, Arg}' generator -%% function is: -%% ``` -%% -spec generator_func(Pipe::riak_pipe:pipe(), -%% Arg::term(), -%% Timeout::integer() | 'infinity'). -%% ''' -%% -%% This generator function is responsible for using {@link -%% riak_pipe:queue_work/2} to send any data to the pipe, and it is -%% responsible for calling {@link riak_pipe:eoi/2} to signal the end -%% of input. -%% -%% == About reduce phase compatibility == -%% -%% An Erlang reduce phase is defined by the tuple: -%% `{reduce, Fun::function(2), Arg::term(), Keep::boolean()}'. -%% -%%
    -%%
  • `Fun' takes the form of `Fun(InputList, Arg)' where `Arg' is -%% the argument specified in the definition 4-tuple above. -%% -%% NOTE: Unlike a fold function (e.g., `lists:foldl/3'), the -%% `Arg' argument is constant for each iteration of the reduce -%% function.
  • -%%
  • The `Arg' may be any term, as the caller sees fit. However, if -%% the caller wishes to have more control over the reduce phase, -%% then `Arg' must be a property list. The control knobs that may -%% be specified are: -%%
      -%%
    • `reduce_phase_only_1' will buffer all inputs to the reduce -%% phase fitting and only call the reduce function once. -%% -%% NOTE: Use with caution to avoid excessive memory use.
    • -%%
    • `{reduce_phase_batch_size, Max::integer()}' will buffer all -%% inputs to the reduce phase fitting and call the reduce function -%% after `Max' items have been buffered.
    • -%%
    -%% If neither `reduce_phase_only_1' nor -%% `{reduce_phase_batch_size, Max}' are present, then the -%% batching size will default to the value of the application -%% environment variable `mapred_reduce_phase_batch_size' in the -%% `riak_kv' application. -%% -%% NOTE: This mixing of user argument data and MapReduce -%% implementation metadata is suboptimal, but to separate the two -%% types of data would require a change that is incompatible with -%% the current Erlang MapReduce input specification, e.g., a -%% 5-tuple such as `{reduce, Fun, Arg, Keep, MetaData}' or else a -%% custom wrapper around the 3rd arg, e.g. `{reduce, Fun, -%% {magic_tag, Arg, Metadata}, Keep}'. -%%
  • -%%
  • If `Keep' is `true', then the output of this phase will be returned -%% to the caller (i.e. the output will be "kept").
  • -%%
- --module(riak_kv_mrc_pipe). - -%% TODO: Stolen from old-style MapReduce interface, but is 60s a good idea? --define(DEFAULT_TIMEOUT, 60000). - --define(SINK_SYNC_PERIOD_DEFAULT, 10). - --export([ - mapred/2, - mapred/3, - mapred_stream/1, - mapred_stream/2, - send_inputs/2, - send_inputs/3, - send_inputs_async/2, - send_inputs_async/3, - collect_outputs/2, - collect_outputs/3, - group_outputs/2, - mapred_stream_sink/3, - collect_sink/1, - receive_sink/1, - destroy_sink/1, - cleanup_sink/1, - error_exists/1, - mapred_plan/1, - mapred_plan/2, - compile_string/1, - compat_fun/1, - sink_sync_period/0 - ]). -%% NOTE: Example functions are used by EUnit tests --export([example/0, example_bucket/0, example_reduce/0, - example_setup/0, example_setup/1]). - --include_lib("riak_pipe/include/riak_pipe.hrl"). --include_lib("riak_pipe/include/riak_pipe_log.hrl"). --include("riak_kv_mrc_sink.hrl"). - --export_type([map_query_fun/0, - reduce_query_fun/0, - key_input/0, - link_match/0]). - -%% All of the types of Input allowed for a MapReduce --type input() :: [key_input()] - | bucket_input() - | index_input() - | search_input() - | modfun_input(). --type key_input() :: riak_kv_pipe_get:input(). --type bucket_input() :: binary() - | {Bucket :: binary(), KeyFilter :: [keyfilter()]}. --type keyfilter() :: [string()]. --type index_input() :: {index, Bucket :: binary(), Index :: binary(), - Key :: term()} - | {index, Bucket :: binary(), Index :: binary(), - Start :: term(), End :: term()}. --type search_input() :: {search, Bucket :: binary(), Query :: binary()} - | {search, Bucket :: binary(), Query :: binary(), - Filter :: [keyfilter()]}. --type modfun_input() :: {modfun, Module :: atom(), Function :: atom(), - Arg :: term()}. - -%% All of the Query syntax allowed --type query_part() :: {map, map_query_fun(), - Arg :: term(), Keep :: boolean()} - | {reduce, reduce_query_fun(), - Arg :: term(), Keep :: boolean()} - | {link, - BucketMatch :: link_match(), - TagMatch :: link_match(), - Keep :: boolean()}. --type map_query_fun() :: - {qfun, fun( (Input :: term(), - KeyData :: term(), - PhaseArg :: term()) -> [term()] )} - | query_fun(). --type reduce_query_fun() :: - {qfun, fun( (Input :: [term()], - PhaseArg :: term()) -> [term()] )} - | query_fun(). --type query_fun() :: - {modfun, Module :: atom(), Function :: atom()} - | {strfun, {Bucket :: binary(), Key :: binary()}} - | {strfun, Source :: string()|binary()} - | {jsanon, {Bucket :: binary(), Key :: binary()}} - | {jsfun, Name :: binary()} - | {jsanon, Source :: binary()}. --type link_match() :: binary() | '_'. - -%% The output of collect_outputs/2,3, group_outputs/2, and collect_sink/1 --type ungrouped_results() :: [{From :: non_neg_integer(), Result :: term()}]. --type grouped_results() :: [Results :: list()] - | list(). - -%% The error reasons returned from collect_sink/1 --type receive_sink_error() :: {sender_died, Reason::term()} - | {sink_died, Reason::term()} - | timeout - | {From::non_neg_integer(), Info::term()}. - -%% @equiv mapred(Inputs, Query, 60000) -mapred(Inputs, Query) -> - mapred(Inputs, Query, ?DEFAULT_TIMEOUT). - -%% @doc Perform a MapReduce `Query' over `Inputs' and return the -%% result. `Timeout' here is the maximum time to wait between the -%% delivery of each output, not an overall timeout. --spec mapred(input(), [query_part()], timeout()) -> - {ok, grouped_results()} | {error, Reason :: term()} - |{error, Reason :: term(), - {ok, grouped_results()} | {error, Reason :: term()}}. -mapred(Inputs, Query, Timeout) -> - case mapred_stream_sink(Inputs, Query, Timeout) of - {ok, Ctx} -> - case collect_sink(Ctx) of - {ok, _}=Success -> - cleanup_sink(Ctx), - Success; - {error, _}=Error -> - destroy_sink(Ctx), - Error - end; - Error -> - {error, Error} - end. - -%% @equiv mapred_stream(Query, []) --spec mapred_stream([query_part()]) -> - {{ok, riak_pipe:pipe()}, NumKeeps :: integer()}. -mapred_stream(Query) -> - mapred_stream(Query, []). - -%% @doc Setup the MapReduce plumbing, preparted to receive inputs. -%% The caller should then use {@link send_inputs/2} or {@link -%% send_inputs/3} to give the query inputs to process. -%% -%% The second element of the return tuple is the number of phases that -%% requested to keep their inputs, and will need to be passed to -%% {@link collect_outputs/3} or {@link group_outputs/2} to get labels -%% compatible with HTTP and PB interface results. --spec mapred_stream([query_part()], list()) -> - {{ok, riak_pipe:pipe()}, NumKeeps :: integer()}. -mapred_stream(Query, Options) when is_list(Options) -> - NumKeeps = count_keeps_in_query(Query), - {riak_pipe:exec(mr2pipe_phases(Query), - [{log, sink},{trace,[error]}]++Options), - NumKeeps}. - -%% @doc Setup the MapReduce plumbing, including separate process to -%% receive output (the sink) and send input (the async sender), and a -%% delayed `pipe_timeout' message. This call returns a context record -%% containing details for each piece. Monitors are setup in the -%% process that calls this function, watching the sink and sender. -%% -%% See {@link receive_sink/1} for details about how to use this -%% context. --spec mapred_stream_sink(input(), [query_part()], timeout()) -> - {ok, #mrc_ctx{}} | {error, term()}. -mapred_stream_sink(Inputs, Query, Timeout) -> - {ok, Sink} = riak_kv_mrc_sink:start(self(), []), - Options = [{sink, #fitting{pid=Sink}}, - {sink_type, {fsm, sink_sync_period(), infinity}}], - try mapred_stream(Query, Options) of - {{ok, Pipe}, NumKeeps} -> - %% catch just in case the pipe or sink has already died - %% for any reason - we'll get a DOWN from the monitor later - catch riak_kv_mrc_sink:use_pipe(Sink, Pipe), - SinkMon = erlang:monitor(process, Sink), - PipeRef = (Pipe#pipe.sink)#fitting.ref, - Timer = erlang:send_after(Timeout, self(), - {pipe_timeout, PipeRef}), - {Sender, SenderMon} = - riak_kv_mrc_pipe:send_inputs_async(Pipe, Inputs), - {ok, #mrc_ctx{ref=PipeRef, - pipe=Pipe, - sink={Sink,SinkMon}, - sender={Sender,SenderMon}, - timer={Timer,PipeRef}, - keeps=NumKeeps}} - catch throw:{badard, Fitting, Reason} -> - riak_kv_mrc_sink:stop(Sink), - {error, {Fitting, Reason}} - end. - - -%% The plan functions are useful for seeing equivalent (we hope) pipeline. - -%% @doc Produce the pipe spec that will implement the given MapReduce -%% query. Intended for debugging only. --spec mapred_plan([query_part()]) -> [ riak_pipe:fitting_spec() ]. -mapred_plan(Query) -> - mr2pipe_phases(Query). - -%% @doc Produce the pipe spec that will implement the given MapReduce -%% query, and prepend a tuple of the form `{bkeys, [key_input()]}'. -%% If `BucketOrList' is a binary bucket name, this function will list -%% the keys in the bucket to return in this tuple. Intended -%% for debugging only. --spec mapred_plan([key_input()]|binary(), [query_part()]) -> - [{bkeys, [key_input()]} | riak_pipe:fitting_spec() ]. -mapred_plan(BucketOrList, Query) -> - BKeys = if is_list(BucketOrList) -> - BucketOrList; - is_binary(BucketOrList) -> - {ok, C} = riak:local_client(), - {ok, Keys} = C:list_keys(BucketOrList), - [{BucketOrList, Key} || Key <- Keys] - end, - [{bkeys, BKeys}|mapred_plan(Query)]. - -%% @doc Convert a MapReduce query into a list of Pipe fitting specs. --spec mr2pipe_phases([query_part()]) -> [ riak_pipe:fitting_spec() ]. -mr2pipe_phases([]) -> - [#fitting_spec{name=0, - module=riak_pipe_w_pass, - chashfun=follow}]; -mr2pipe_phases(Query) -> - %% now() is used as a random hash to choose which vnode to collect - %% the reduce inputs - Now = now(), - - %% first convert phase - QueryT = list_to_tuple(Query), - Numbered = lists:zip(Query, lists:seq(0, length(Query)-1)), - Fittings0 = lists:flatten([mr2pipe_phase(P,I,Now,QueryT) || - {P,I} <- Numbered]), - - %% clean up naive 'keep' translationg - Fs = fix_final_fitting(Fittings0), - case lists:last(Query) of - {_, _, _, false} -> - %% The default action is to send results down to the next - %% fitting in the pipe. However, the last MapReduce query - %% doesn't want those results. So, add a "black hole" - %% fitting that will stop all work items from getting to - %% the sink and thus polluting our expected results. - Fs ++ [#fitting_spec{name=black_hole, - module=riak_pipe_w_pass, - arg=black_hole, - chashfun=follow}]; - _ -> - Fs - end. - --spec mr2pipe_phase(query_part(), - Index :: integer(), - ConstantHashSeed :: term(), - Query :: tuple()) -> - [ riak_pipe:fitting_spec() ]. -mr2pipe_phase({map,FunSpec,Arg,Keep}, I, _ConstHashCookie, QueryT) -> - map2pipe(FunSpec, Arg, Keep, I, QueryT); -mr2pipe_phase({reduce,FunSpec,Arg,Keep}, I, ConstHashCookie, _QueryT) -> - reduce2pipe(FunSpec, Arg, Keep, I, ConstHashCookie); -mr2pipe_phase({link,Bucket,Tag,Keep}, I, _ConstHashCookie, QueryT)-> - link2pipe(Bucket, Tag, Keep, I, QueryT). - -%% @doc Covert a map phase to its pipe fitting specs. -%% -%% Map converts to: -%%
    -%%
  1. A required {@link riak_kv_pipe_get} to fetch the data for -%% the input key.
  2. -%%
  3. A required {@link riak_kv_mrc_map} to run the given query -%% function on that data.
  4. -%%
  5. An optional {@link riak_pipe_w_tee} if `keep=true'.
  6. -%%
  7. An optional {@link riak_kv_w_reduce} if it is determined -%% that results should be prereduced before being sent on.
  8. -%%
-%% -%% Prereduce logic: add pre_reduce fittings to the pipe line if the -%% current item is a map (if you're calling this func, yes it is) and -%% if the next item in the query is a reduce and if the map's arg or -%% system config wants us to use prereduce. Remember: `I' starts -%% counting at 0, but the element BIF starts at 1, so the element of -%% the next item is I+2. --spec map2pipe(map_query_fun(), term(), boolean(), - Index :: integer(), Query :: tuple()) -> - [ riak_pipe:fitting_spec() ]. -map2pipe(FunSpec, Arg, Keep, I, QueryT) -> - PrereduceP = I+2 =< size(QueryT) andalso - query_type(I+2, QueryT) == reduce andalso - want_prereduce_p(I+1, QueryT), - SafeArg = case FunSpec of - {JS, _} when (JS == jsfun orelse JS == jsanon), - is_list(Arg) -> - %% mochijson cannot encode these properties, - %% so remove them from the argument list - lists:filter( - fun(do_prereduce) -> false; - ({do_prereduce,_}) -> false; - (_) -> true - end, - Arg); - _ -> - Arg - end, - [#fitting_spec{name={kvget_map,I}, - module=riak_kv_pipe_get, - chashfun={riak_kv_pipe_get, bkey_chash}, - nval={riak_kv_pipe_get, bkey_nval}}, - #fitting_spec{name={xform_map,I}, - module=riak_kv_mrc_map, - arg={FunSpec, SafeArg}, - chashfun=follow}] - ++ - [#fitting_spec{name=I, - module=riak_pipe_w_tee, - arg=sink, - chashfun=follow} || Keep] - ++ - if PrereduceP -> - {reduce, R_FunSpec, R_Arg, _Keep} = element(I+2, QueryT), - [#fitting_spec{name={prereduce,I}, - module=riak_kv_w_reduce, - arg={rct, - riak_kv_w_reduce:reduce_compat(R_FunSpec), - R_Arg}, - chashfun=follow}]; - true -> - [] - end. - -%% @doc Examine query and application options to determine if -%% prereduce is appropriate. --spec want_prereduce_p(Index :: integer(), Query :: tuple()) -> - boolean(). -want_prereduce_p(Idx, QueryT) -> - {map, _FuncSpec, Arg, _Keep} = element(Idx, QueryT), - Props = case Arg of - L when is_list(L) -> L; % May or may not be a proplist - {struct, L} -> L; % mochijson form - _ -> [] - end, - AppDefault = app_helper:get_env(riak_kv, mapred_always_prereduce, false), - true =:= proplists:get_value( - <<"do_prereduce">>, Props, % mochijson form - proplists:get_value(do_prereduce, Props, AppDefault)). - --spec query_type(integer(), tuple()) -> map | reduce | link. -query_type(Idx, QueryT) -> - element(1, element(Idx, QueryT)). - -%% @doc Convert a reduce phase to its equivalent pipe fittings. -%% -%% Reduce converts to: -%%
    -%%
  1. A required {@link riak_kv_w_reduce} to run the given query -%% function on the input data.
  2. -%%
  3. An optional {@link riak_pipe_w_tee} if `keep=true'.
  4. -%%
-%% -%% A constant has is used to get all of the inputs for the reduce to -%% the same vnode, without caring about which specific vnode that is. --spec reduce2pipe(reduce_query_fun(), term(), boolean(), - Index :: integer(), ConstantHashSeed :: term()) -> - [ riak_pipe:fitting_spec() ]. -reduce2pipe(FunSpec, Arg, Keep, I, ConstHashCookie) -> - Hash = chash:key_of(ConstHashCookie), - [#fitting_spec{name={reduce,I}, - module=riak_kv_w_reduce, - arg={rct, - riak_kv_w_reduce:reduce_compat(FunSpec), - Arg}, - chashfun=Hash} - |[#fitting_spec{name=I, - module=riak_pipe_w_tee, - arg=sink, - chashfun=follow} - ||Keep]]. - -%% @doc Convert a link phase to its equivalent pipe fittings. -%% -%% Link converts to: -%% Map converts to: -%%
    -%%
  1. A required {@link riak_kv_pipe_get} to fetch the data for -%% the input key.
  2. -%%
  3. A required {@link riak_pipe_w_xform} to perform the link -%% extraction
  4. -%%
  5. An optional {@link riak_pipe_w_tee} if `keep=true'.
  6. -%%
--spec link2pipe(link_match(), link_match(), boolean(), - Index :: integer(), Query :: tuple()) -> - [ riak_pipe:fitting_spec() ]. -link2pipe(Bucket, Tag, Keep, I, _QueryT) -> - [#fitting_spec{name={kvget_map,I}, - module=riak_kv_pipe_get, - chashfun={riak_kv_pipe_get, bkey_chash}, - nval={riak_kv_pipe_get, bkey_nval}}, - #fitting_spec{name={xform_map,I}, - module=riak_kv_mrc_map, - arg={{modfun, riak_kv_mrc_map, link_phase}, - {Bucket, Tag}}, - chashfun=follow}| - [#fitting_spec{name=I, - module=riak_pipe_w_tee, - arg=sink, - chashfun=follow} || Keep]]. - -%% @doc Strip extra 'tee' fittings, and correct fitting names used by -%% the naive converters. --spec fix_final_fitting([ riak_pipe:fitting_spec() ]) -> - [ riak_pipe:fitting_spec() ]. -fix_final_fitting(Fittings) -> - case lists:reverse(Fittings) of - [#fitting_spec{module=riak_pipe_w_tee, - name=Int}, - #fitting_spec{}=RealFinal|Rest] - when is_integer(Int) -> - %% chop off tee so we don't get double answers - lists:reverse([RealFinal#fitting_spec{name=Int}|Rest]); - [#fitting_spec{name={_Type,Int}}=Final|Rest] - when is_integer(Int) -> - %% fix final name so outputs look like old API - lists:reverse([Final#fitting_spec{name=Int}|Rest]) - end. - -%% @doc How many phases have `keep=true'? --spec count_keeps_in_query([query_part()]) -> non_neg_integer(). -count_keeps_in_query(Query) -> - lists:foldl(fun({_, _, _, true}, Acc) -> Acc + 1; - (_, Acc) -> Acc - end, 0, Query). - -%% @equiv send_inputs_async(Pipe, Inputs, 60000) -send_inputs_async(Pipe, Inputs) -> - send_inputs_async(Pipe, Inputs, ?DEFAULT_TIMEOUT). - -%% @doc Spawn a process to send inputs to the MapReduce pipe. If -%% sending completes without error, the process will exit normally. -%% If errors occur, the process exits with the error as its reason. -%% -%% The process links itself to the pipeline (via the builder), so if -%% the pipeline shutsdown before sending inputs finishes, the process -%% will be torn down automatically. This also means that an error -%% sending inputs will automatically tear down the pipe (because the -%% process will exit abnormally). -%% -%% It's a good idea to prefer sending inputs and receiving outputs in -%% different processes, especially if you're both sending a large -%% number of inputs (a large bucket list, for instance) and expecting -%% to receive a large number of outputs. The mailbox for a process -%% doing both is likely to be a point of contention, otherwise. --spec send_inputs_async(riak_pipe:pipe(), input(), timeout()) -> - {Sender::pid(), MonitorRef::reference()}. -send_inputs_async(Pipe, Inputs, Timeout) -> - spawn_monitor( - fun() -> - %% tear this process down if the pipeline goes away; - %% also automatically tears down the pipeline if feeding - %% it inputs fails (which is what the users of this - %% function, riak_kv_pb_socket and riak_kv_wm_mapred, want) - erlang:link(Pipe#pipe.builder), - case send_inputs(Pipe, Inputs, Timeout) of - ok -> - %% monitoring process sees a 'normal' exit - %% (and linked builder is left alone) - ok; - Error -> - %% monitoring process sees an 'error' exit - %% (and linked builder dies) - exit(Error) - end - end). - -%% @equiv send_inputs(Pipe, Inputs, 60000) -send_inputs(Pipe, Inputs) -> - send_inputs(Pipe, Inputs, ?DEFAULT_TIMEOUT). - -%% @doc Send inputs into the MapReduce pipe. This function handles -%% setting up the bucket-listing, index-querying, searching, or -%% modfun-evaluating needed to produce keys, if the input is not just -%% a list of keys. --spec send_inputs(riak_pipe:pipe(), input(), timeout()) -> - ok | term(). -send_inputs(Pipe, BucketKeyList, _Timeout) when is_list(BucketKeyList) -> - try [ok = riak_pipe:queue_work(Pipe, BKey) - || BKey <- BucketKeyList] of - _ -> - riak_pipe:eoi(Pipe), - ok - catch error:{badmatch,{error,_}=Error} -> - Error - end; -send_inputs(Pipe, Bucket, Timeout) when is_binary(Bucket) -> - riak_kv_pipe_listkeys:queue_existing_pipe(Pipe, Bucket, Timeout); -send_inputs(Pipe, {Bucket, FilterExprs}, Timeout) -> - case riak_kv_mapred_filters:build_filter(FilterExprs) of - {ok, Filters} -> - riak_kv_pipe_listkeys:queue_existing_pipe( - Pipe, {Bucket, Filters}, Timeout); - Error -> - Error - end; -send_inputs(Pipe, {index, Bucket, Index, Key}, Timeout) -> - Query = {eq, Index, Key}, - case riak_core_capability:get({riak_kv, mapred_2i_pipe}, false) of - true -> - riak_kv_pipe_index:queue_existing_pipe( - Pipe, Bucket, Query, Timeout); - _ -> - %% must use modfun form if there are 1.0 nodes in the cluster, - %% because they do not have the riak_kv_pipe_index module - NewInput = {modfun, riak_index, mapred_index, [Bucket, Query]}, - send_inputs(Pipe, NewInput, Timeout) - end; -send_inputs(Pipe, {index, Bucket, Index, StartKey, EndKey}, Timeout) -> - Query = {range, Index, StartKey, EndKey}, - case riak_core_capability:get({riak_kv, mapred_2i_pipe}, false) of - true -> - riak_kv_pipe_index:queue_existing_pipe( - Pipe, Bucket, Query, Timeout); - _ -> - NewInput = {modfun, riak_index, mapred_index, [Bucket, Query]}, - send_inputs(Pipe, NewInput, Timeout) - end; -send_inputs(Pipe, {search, Bucket, Query}, Timeout) -> - NewInput = {modfun, riak_search, mapred_search, [Bucket, Query, []]}, - send_inputs(Pipe, NewInput, Timeout); -send_inputs(Pipe, {search, Bucket, Query, Filter}, Timeout) -> - NewInput = {modfun, riak_search, mapred_search, [Bucket, Query, Filter]}, - send_inputs(Pipe, NewInput, Timeout); -send_inputs(Pipe, {modfun, Mod, Fun, Arg} = Modfun, Timeout) -> - try Mod:Fun(Pipe, Arg, Timeout) of - {ok, Bucket, ReqId} -> - send_key_list(Pipe, Bucket, ReqId); - Other -> - Other - catch - X:Y -> - {Modfun, X, Y, erlang:get_stacktrace()} - end. - -%% @doc Helper function used to redirect the results of -%% index/search/etc. queries into the MapReduce pipe. The function -%% expects to receive zero or more messages of the form `{ReqId, -%% {keys, Keys}}' or `{ReqId, {results, Results}}', followed by one -%% message of the form `{ReqId, done}'. --spec send_key_list(riak_pipe:pipe(), binary(), term()) -> - ok | term(). -send_key_list(Pipe, Bucket, ReqId) -> - receive - {ReqId, {keys, Keys}} -> - %% Get results from list keys operation. - try [ok = riak_pipe:queue_work(Pipe, {Bucket, Key}) - || Key <- Keys] of - _ -> - send_key_list(Pipe, Bucket, ReqId) - catch error:{badmatch,{error,_}=Error} -> - Error - end; - - {ReqId, {results, Results}} -> - %% Get results from 2i operation. Handle both [Keys] and [{Key, - %% Props}] formats. If props exists, use it as keydata. - F = fun - ({Key, Props}) -> - riak_pipe:queue_work(Pipe, {{Bucket, Key}, Props}); - (Key) -> - riak_pipe:queue_work(Pipe, {Bucket, Key}) - end, - try [ok = F(X) || X <- Results] of - _ -> - send_key_list(Pipe, Bucket, ReqId) - catch error:{badmatch,{error,_}=Error} -> - Error - end; - - {ReqId, {error, Reason}} -> - {error, Reason}; - - {ReqId, done} -> - %% Operation has finished. - riak_pipe:eoi(Pipe), - ok - end. - -%% @equiv collect_outputs(Pipe, NumKeeps, 60000) -collect_outputs(Pipe, NumKeeps) -> - collect_outputs(Pipe, NumKeeps, ?DEFAULT_TIMEOUT). - -%% @doc Receive the results produced by the MapReduce pipe (directly, -%% with no sink process between here and there), grouped by the phase -%% they came from. See {@link group_outputs/2} for details on that -%% grouping. --spec collect_outputs(riak_pipe:pipe(), non_neg_integer(), timeout()) -> - {ok, grouped_results()} - | {error, {Reason :: term(), Outputs :: ungrouped_results()}}. -collect_outputs(Pipe, NumKeeps, Timeout) -> - {Result, Outputs, []} = riak_pipe:collect_results(Pipe, Timeout), - case Result of - eoi -> - %% normal result - {ok, group_outputs(Outputs, NumKeeps)}; - Other -> - {error, {Other, Outputs}} - end. - -%% @doc Group the outputs of the MapReduce pipe by the phase that -%% produced them. To be used with {@link collect_outputs/3}. If -%% `NumKeeps' is 2 or more, the return value is a list of result -%% lists, `[Results :: list()]', in the same order as the phases that -%% produced them. If `NumKeeps' is less than 2, the return value is -%% just a list (possibly empty) of results, `Results :: list()'. --spec group_outputs(ungrouped_results(), non_neg_integer()) -> - grouped_results(). -group_outputs(Outputs, NumKeeps) when NumKeeps < 2 -> % 0 or 1 - %% this path trusts that outputs are from only one phase; - %% if NumKeeps lies, all phases will be grouped together; - %% this is much faster than using dict:append/3 for a single key - %% when length(Outputs) is large - [ O || {_, O} <- Outputs ]; -group_outputs(Outputs, _NumKeeps) -> - Group = fun({I,O}, Acc) -> - %% it is assumed that the number of phases - %% producing outputs is small, so a linear search - %% through phases we've seen is not too taxing - case lists:keytake(I, 1, Acc) of - {value, {I, IAcc}, RAcc} -> - [{I,[O|IAcc]}|RAcc]; - false -> - [{I,[O]}|Acc] - end - end, - Merged = lists:foldl(Group, [], Outputs), - [ lists:reverse(O) || {_, O} <- lists:keysort(1, Merged) ]. - -%% @doc Receive the results produced by the MapReduce pipe, via the -%% sink started in {@link mapred_stream_sink/3}, grouped by the phase -%% they came from. If `NumKeeps' is 2 or more, the return value is a -%% list of result lists, `[Results :: list()]', in the same order as -%% the phases that produced them. If `NumKeeps' is less than 2, the -%% return value is just a list (possibly empty) of results, `Results -%% :: list()'. --spec collect_sink(#mrc_ctx{}) -> - {ok, grouped_results()} -<<<<<<< HEAD - | {error, {Reason :: term(), Outputs :: ungrouped_results()}}. -======= - | {error, receive_sink_error()}. ->>>>>>> master -collect_sink(#mrc_ctx{keeps=NumKeeps}=Ctx) -> - case collect_sink_loop(Ctx, []) of - {ok, Outputs} -> - {ok, remove_fitting_names(Outputs, NumKeeps)}; - {error, Reason, _}-> - {error, Reason} - end. - -%% collect everything the pipe has to offer -collect_sink_loop(Ctx, Acc) -> - case receive_sink(Ctx) of - {ok, false, Output} -> - collect_sink_loop(Ctx, [Output|Acc]); - {ok, true, Output} -> - {ok, riak_kv_mrc_sink:merge_outputs([Output|Acc])}; - {error, Reason, Outputs} -> - {error, Reason, Outputs} - end. - -%% @doc Receive any output generated by the system set up in {@link -%% mapred_stream_sink/3}. This will include any of the following: -%% -%%
    -%%
  • `#kv_mrc_sink{}'
  • -%%
  • `DOWN' for `#mrc_ctx.sender' (the async sender)
  • -%%
  • `DOWN' for `#mrc_ctx.sink'
  • -%%
  • `{pipe_timeout, #mrc_ctx.ref}'
  • -%%
-%% -%% An `{ok, Done::boolean(), Results::orddict()}' tuple is returned if -%% a `#kv_mrc_sink{}' message is recieved with no error logs. An -%% `{error, Reason::term(), PartialResults::orddict()}' tuple is -%% returned if any of the following are received: `#kv_mrc_sink{}' -%% message with an error log, a `DOWN' for the async sender with -%% non-`normal' reason, a `DOWN' for the sink, or the `pipe_timeout'. -%% -%% Note that this function calls {@link riak_kv_mrc_sink:next/1}, so -%% your code should not also call it. --spec receive_sink(#mrc_ctx{}) -> - {ok, Done::boolean(), Results::grouped_results()} -<<<<<<< HEAD - | {error, Reason::term(), PartialResults::grouped_results()}. -======= - | {error, receive_sink_error(), PartialResults::grouped_results()}. ->>>>>>> master -receive_sink(#mrc_ctx{sink={Sink,_}}=Ctx) -> - %% the sender-DOWN-normal case loops to ignore that message, but - %% we only want to send our next-request once - riak_kv_mrc_sink:next(Sink), - receive_sink_helper(Ctx). - -receive_sink_helper(#mrc_ctx{ref=PipeRef, - sink={Sink, SinkMon}, - sender={Sender, SenderMon}}=Ctx) -> - receive - #kv_mrc_sink{ref=PipeRef, results=Results, logs=Logs, done=Done} -> - case error_exists(Logs) of - {true, From, Info} -> - {error, {From, Info}, Results}; - false -> - {ok, Done, Results} - end; - {'DOWN', SenderMon, process, Sender, normal} -> - %% sender dying normal just means it finished - receive_sink_helper(Ctx); - {'DOWN', SenderMon, process, Sender, Reason} -> - {error, {sender_died, Reason}, []}; - {'DOWN', SinkMon, process, Sink, Reason} -> - {error, {sink_died, Reason}, []}; - {pipe_timeout, PipeRef} -> - {error, timeout, []} - end. - -%% MR is supposed to return just a list of results if there was only -%% one phase being "kept", but a list of result lists (one per phase) -%% if multiple phases were kept. -remove_fitting_names([{_,Outputs}], NumKeeps) when NumKeeps < 2 -> - Outputs; -remove_fitting_names(Outputs, _NumKeeps) -> - [O || {_, O} <- Outputs]. - -%% @doc Destroy the pipe, and call {@link cleanup_sink/1}. --spec destroy_sink(#mrc_ctx{}) -> ok. -destroy_sink(#mrc_ctx{pipe=Pipe}=Ctx) -> - riak_pipe:destroy(Pipe), - cleanup_sink(Ctx). - -%% @doc Tear down the async sender, sink, and timer pieces setup by -%% {@link mapred_stream_sink/3}, and collect any messages they might -%% have been delivering. -<<<<<<< HEAD --spec cleanup_sink(#mrc_ctx{}) -> ok. -======= --spec cleanup_sink(#mrc_ctx{}|{pid(),reference()}|undefined) -> ok. ->>>>>>> master -cleanup_sink(#mrc_ctx{sender=Sender, sink=Sink, timer=Timer}) -> - cleanup_sender(Sender), - cleanup_sink(Sink), - cleanup_timer(Timer); -cleanup_sink({SinkPid, SinkMon}) when is_pid(SinkPid), - is_reference(SinkMon) -> - erlang:demonitor(SinkMon, [flush]), - %% killing the sink should tear down the pipe - riak_kv_mrc_sink:stop(SinkPid), - %% receive just in case the sink had sent us one last response - receive #kv_mrc_sink{} -> ok after 0 -> ok end; -cleanup_sink(undefined) -> - ok. - -%% Destroying the pipe via riak_pipe_builder:destroy/1 does not kill -%% the sender immediately, because it causes the builder to exit with -%% reason `normal', so no exit signal is sent. The sender will -%% eventually receive `worker_startup_error's from vnodes that can no -%% longer find the fittings, but to help the process along, we kill -%% them immediately here. -<<<<<<< HEAD -cleanup_sender(#mrc_ctx{sender=Sender}) -> - cleanup_sender(Sender); -======= ->>>>>>> master -cleanup_sender({SenderPid, SenderMon}) when is_pid(SenderPid), - is_reference(SenderMon) -> - erlang:demonitor(SenderMon, [flush]), - exit(SenderPid, kill), - ok; -cleanup_sender(undefined) -> - ok. - -%% don't let timer messages leak -<<<<<<< HEAD -cleanup_timer(#mrc_ctx{timer=Timer}) -> - cleanup_timer(Timer); -======= ->>>>>>> master -cleanup_timer({Tref, PipeRef}) when is_reference(Tref), - is_reference(PipeRef) -> - case erlang:cancel_timer(Tref) of - false -> - receive - {pipe_timeout, PipeRef} -> - ok - after 0 -> - ok - end; - _ -> - ok - end; -cleanup_timer(undefined) -> - ok. - -%% @doc Look through the logs the pipe produced, and determine if any -%% of them signal an error. Return the details about the first error -%% found. -%% -%% Each log should be of the form: `{#pipe_log.from, #pipe_log.msg}' --spec error_exists(list()) -> {true, term(), term()} | false. -error_exists(Logs) -> - case [ {F, I} || {F, {trace, [error], {error, I}}} <- Logs ] of - [{From, Info}|_] -> - {true, From, Info}; - [] -> - false - end. - -%% @doc Produce an Erlang term from a string containing Erlang code. -%% This is used by {@link riak_kv_mrc_map} and {@link -%% riak_kv_w_reduce} to compile functions specified as `{strfun, -%% Source}'. --spec compile_string(string()|binary()) -> {ok, term()} - | {ErrorType :: term, Reason :: term}. -compile_string(Binary) when is_binary(Binary) -> - compile_string(binary_to_list(Binary)); -compile_string(String) when is_list(String) -> - try - {ok, Tokens, _} = erl_scan:string(String), - {ok, [Form]} = erl_parse:parse_exprs(Tokens), - {value, Value, _} = erl_eval:expr(Form, erl_eval:new_bindings()), - {ok, Value} - catch Type:Error -> - {Type, Error} - end. - -%% choose sink sync period, given Options, app env, default --spec sink_sync_period() -> integer() | infinity. -sink_sync_period() -> - case application:get_env(riak_kv, mrc_sink_sync_period) of - {ok, Size} when is_integer(Size); Size == infinity -> - Size; - _ -> - ?SINK_SYNC_PERIOD_DEFAULT - end. - -%%% - -%% @doc Use a MapReduce query to get the value of the `foo/bar' -%% object. See {@link example_setup/1} for details of what should be -%% in `foo/bar'. --spec example() -> {ok, [binary()]} - | {error, term()} | {error, term(), term()}. -example() -> - mapred([{<<"foo">>, <<"bar">>}], - [{map, {modfun, riak_kv_mapreduce, map_object_value}, - none, true}]). - -%% @doc Use a MapReduce query to get the values of the objects in the -%% `foo' bucket. See {@link example_setup/1} for details of what -%% should be in `foo/*'. --spec example_bucket() -> {ok, [binary()]} - | {error, term()} | {error, term(), term()}. -example_bucket() -> - mapred(<<"foo">>, - [{map, {modfun, riak_kv_mapreduce, map_object_value}, - none, true}]). - -%% @doc Use a MapReduce query to sum the values of the objects in the -%% `foonum' bucket. See {@link example_setup/1} for details of what -%% should be in `foonum/*'. -%% -%% This function asks to keep the results of both the map phase and -%% the reduce phase, so the output should be a list containing two -%% lists. The first sublist should contain all of the values of the -%% objects in the bucket. The second sublist should contain only one -%% element, equal to the sum of the elements in the first sublist. -%% For example, `[[1,2,3,4,5],[15]]'. --spec example_reduce() -> {ok, [[integer()]]} - | {error, term()} | {error, term(), term()}. -example_reduce() -> - mapred(<<"foonum">>, - [{map, {modfun, riak_kv_mapreduce, map_object_value}, - none, true}, - {reduce, {qfun, fun(Inputs, _) -> [lists:sum(Inputs)] end}, - none, true}]). - -%% @equiv example_setup(5) -example_setup() -> - example_setup(5). - -%% @doc Store some example data for the other example functions. -%% -%% Objects stored: -%%
-%%
`foo/bar'
-%%
Stores the string "what did you expect?"
-%% -%%
`foo/bar1' .. `foo/barNum'
-%%
Each stores the string "bar val INDEX"
-%% -%%
`foonum/bar1' .. `foo/barNum'
-%%
Each stores its index as an integer
-%%
--spec example_setup(pos_integer()) -> ok. -example_setup(Num) when Num > 0 -> - {ok, C} = riak:local_client(), - C:put(riak_object:new(<<"foo">>, <<"bar">>, <<"what did you expect?">>)), - [C:put(riak_object:new(<<"foo">>, - list_to_binary("bar"++integer_to_list(X)), - list_to_binary("bar val "++integer_to_list(X)))) - || X <- lists:seq(1, Num)], - [C:put(riak_object:new(<<"foonum">>, - list_to_binary("bar"++integer_to_list(X)), - X)) || - X <- lists:seq(1, Num)], - ok. - -%% @doc For Riak 1.0 compatibility, provide a translation from old -%% anonymous functions to new ones. This function should have a -%% limited-use lifetime: it will only be evaluated while a cluster is -%% in the middle of a rolling-upgrade from 1.0.x to 1.1. -%% -%% Yes, the return value is a new anonymous function. This shouldn't -%% be a problem with a future upgrade, though, as no one should be -%% running a cluster that includes three Riak versions. Therefore, the -%% node that spread this old Fun around the cluster should have been -%% stopped, along with the pipe defined by the old fun before this new -%% fun would itself be considered old. -compat_fun(Fun) -> - {uniq, Uniq} = erlang:fun_info(Fun, uniq), - {index, I} = erlang:fun_info(Fun, index), - compat_fun(Uniq, I, Fun). - -%% Riak 1.0.1 and 1.0.2 funs -compat_fun(120571329, 1, _Fun) -> - %% chash used for kv_get in map - {ok, fun riak_kv_pipe_get:bkey_chash/1}; -compat_fun(112900629, 2, _Fun) -> - %% nval used for kv_get in map - {ok, fun riak_kv_pipe_get:bkey_nval/1}; -compat_fun(19126064, 3, Fun) -> - %% constant chash used for reduce - {env, [Hash]} = erlang:fun_info(Fun, env), - {ok, fun(_) -> Hash end}; -compat_fun(29992360, 4, _Fun) -> - %% chash used for kv_get in link - {ok, fun riak_kv_pipe_get:bkey_chash/1}; -compat_fun(22321692, 5, _Fun) -> - %% nval used for kv_get in link - {ok, fun riak_kv_pipe_get:bkey_nval/1}; -compat_fun(66856669, 6, Fun) -> - %% link extraction function - %% Yes, the env really does have bucket and tag the reverse of the spec - {env, [Tag, Bucket]} = erlang:fun_info(Fun, env), - {ok, fun({ok, Input, _Keydata}, Partition, FittingDetails) -> - Results = riak_kv_mrc_map:link_phase( - Input, undefined, {Bucket, Tag}), - [ riak_pipe_vnode_worker:send_output( - R, Partition, FittingDetails) - || R <- Results ], - ok; - ({{error, _},_,_}, _, _) -> - ok - end}; - -%% dunno -compat_fun(_, _, _) -> - error. diff --git a/src/riak_kv_mrc_sink.erl.orig b/src/riak_kv_mrc_sink.erl.orig deleted file mode 100644 index f20f29c8f0..0000000000 --- a/src/riak_kv_mrc_sink.erl.orig +++ /dev/null @@ -1,442 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_mrc_sink: A simple process to act as a Pipe sink for -%% MapReduce queries -%% -%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc This FSM acts as a Riak Pipe sink, and dumbly accumulates -%% messages received from the pipe, until it is asked to send them to -%% its owner. The owner is whatever process started this FSM. - -%% This FSM will speak both `raw' and `fsm' sink types (it -%% answers appropriately to each, without parameterization). - -%% The FSM enforces a soft cap on the number of results and logs -%% accumulated when receiving `fsm' sink type messages. When the -%% number of results+logs that have been delivered exceeds the cap -%% between calls to {@link next/1}, the sink stops delivering result -%% acks to workers. The value of this cap can be specified by -%% including a `buffer' property in the `Options' parameter of {@link -%% start/2}, or by setting the `mrc_sink_buffer' environment variable -%% in the `riak_kv' application. If neither settings is specified, or -%% they are not specified as non-negative integers, the default -%% (currently 1000) is used. - -%% Messages are delivered to the owners as an erlang message that is a -%% `#kv_mrc_pipe{}' record. The `logs' field is a list of log messages -%% received, ordered oldest to youngest, each having the form -%% `{PhaseId, Message}'. The `results' field is an orddict keyed by -%% `PhaseId', with each value being a list of results received from -%% that phase, ordered oldest to youngest. The `ref' field is the -%% reference from the `#pipe{}' record. The `done' field is `true' if -%% the `eoi' message has been received, or `false' otherwise. - -%% There should be three states: `which_pipe', `collect_output', and -%% `send_output'. - -%% The FSM starts in `which_pipe', and waits there until it -%% is told which pipe to expect output from. - -%% From `which_pipe', the FSM moves to `collect_output'. While in -%% `collect_output', the FSM simply collects `#pipe_log{}', -%% `#pipe_result{}', and `#pipe_eoi{}' messages. - -%% If the FSM has received logs, results, or the eoi before it -%% receives a `next' event, it sends everything it has accumulated to -%% the owner, wrapped in a `#kv_mrc_sink{}' record, clears its buffers, -%% and returns to collecting pipe messages. - -%% If the FSM has not received any logs, results, or the eoi before it -%% receives a `next' event, it enters the `send_ouput' state. As soon -%% as the FSM receives any log, result, or eoi message in the -%% `send_output' state, it sends that message to the owner process, -%% and then returns to the `collect_output' state. - -%% The FSM only exits on its own in three cases. The first is when its -%% owner exits. The second is when the builder of the pipe for which -%% it is consuming messages exits abnormally. The third is after it -%% delivers the a `#kv_mrc_sink{}' in which it has marked -%% `done=true'. --module(riak_kv_mrc_sink). - --export([ - start/2, - start_link/2, - use_pipe/2, - next/1, - stop/1, - merge_outputs/1, - init/1, - which_pipe/2, which_pipe/3, - collect_output/2, collect_output/3, - send_output/2, send_output/3, - handle_event/3, - handle_sync_event/4, - handle_info/3, - terminate/3, - code_change/4 - ]). - --behaviour(gen_fsm). - --ifdef(TEST). --include_lib("eunit/include/eunit.hrl"). --endif. - --include_lib("riak_pipe/include/riak_pipe.hrl"). --include("riak_kv_mrc_sink.hrl"). - --define(BUFFER_SIZE_DEFAULT, 1000). - --record(state, { - owner :: pid(), - builder :: pid(), - ref :: reference(), - results=[] :: [{PhaseId::term(), Results::list()}], - delayed_acks=[] :: list(), - logs=[] :: list(), - done=false :: boolean(), - buffer_max :: integer(), - buffer_left :: integer() - }). - -start(OwnerPid, Options) -> - riak_kv_mrc_sink_sup:start_sink(OwnerPid, Options). - -start_link(OwnerPid, Options) -> - gen_fsm:start_link(?MODULE, [OwnerPid, Options], []). - -use_pipe(Sink, Pipe) -> - gen_fsm:sync_send_event(Sink, {use_pipe, Pipe}). - -%% @doc Trigger the send of the next result/log/eoi batch received. -next(Sink) -> - gen_fsm:send_event(Sink, next). - -stop(Sink) -> - riak_kv_mrc_sink_sup:terminate_sink(Sink). - -%% @doc Convenience: If outputs are collected as a list of orddicts, -%% with the first being the most recently received, merge them into -%% one orddict. -%% -%% That is, for one keep, our input should look like: -%% [ [{0, [G,H,I]}], [{0, [D,E,F]}], [{0, [A,B,C]}] ] -%% And we want it to come out as: -%% [{0, [A,B,C,D,E,F,G,H,I]}] --spec merge_outputs([ [{integer(), list()}] ]) -> [{integer(), list()}]. -merge_outputs(Acc) -> - %% each orddict has its outputs in oldest->newest; since we're - %% iterating from newest->oldest overall, we can just tack the - %% next list onto the front of the accumulator - DM = fun(_K, O, A) -> O++A end, - lists:foldl(fun(O, A) -> orddict:merge(DM, O, A) end, [], Acc). - -%% gen_fsm exports - -init([OwnerPid, Options]) -> - erlang:monitor(process, OwnerPid), - Buffer = buffer_size(Options), - {ok, which_pipe, #state{owner=OwnerPid, - buffer_max=Buffer, - buffer_left=Buffer}}. - -%%% which_pipe: waiting to find out what pipe we're listening to - -which_pipe(_, State) -> - {next_state, which_pipe, State}. - -which_pipe({use_pipe, #pipe{builder=Builder, sink=Sink}}, _From, State) -> - erlang:monitor(process, Builder), - {reply, ok, collect_output, - State#state{builder=Builder, ref=Sink#fitting.ref}}; -which_pipe(_, _, State) -> - {next_state, which_pipe, State}. - -%%% collect_output: buffering results and logs until asked for them - -collect_output(next, State) -> - case State#state.done of - true -> - NewState = send_to_owner(State), - {stop, normal, NewState}; - false -> - case has_output(State) of - true -> - NewState = send_to_owner(State), - {next_state, collect_output, NewState}; - false -> - %% nothing to send yet, prepare to send as soon as - %% there is something - {next_state, send_output, State} - end - end; -collect_output(#pipe_result{ref=Ref, from=PhaseId, result=Res}, - #state{ref=Ref, results=Acc}=State) -> - NewAcc = add_result(PhaseId, Res, Acc), - {next_state, collect_output, State#state{results=NewAcc}}; -collect_output(#pipe_log{ref=Ref, from=PhaseId, msg=Msg}, - #state{ref=Ref, logs=Acc}=State) -> - {next_state, collect_output, State#state{logs=[{PhaseId, Msg}|Acc]}}; -collect_output(#pipe_eoi{ref=Ref}, #state{ref=Ref}=State) -> - {next_state, collect_output, State#state{done=true}}; -collect_output(_, State) -> - {next_state, collect_output, State}. - -collect_output(#pipe_result{ref=Ref, from=PhaseId, result=Res}, - From, - #state{ref=Ref, results=Acc}=State) -> - NewAcc = add_result(PhaseId, Res, Acc), - maybe_ack(From, State#state{results=NewAcc}); -collect_output(#pipe_log{ref=Ref, from=PhaseId, msg=Msg}, - From, - #state{ref=Ref, logs=Acc}=State) -> - maybe_ack(From, State#state{logs=[{PhaseId, Msg}|Acc]}); -collect_output(#pipe_eoi{ref=Ref}, _From, #state{ref=Ref}=State) -> - {reply, ok, collect_output, State#state{done=true}}; -collect_output(_, _, State) -> - {next_state, collect_output, State}. - -maybe_ack(_From, #state{buffer_left=Left}=State) when Left > 0 -> - %% there's room for more, tell the worker it can continue - {reply, ok, collect_output, State#state{buffer_left=Left-1}}; -maybe_ack(From, #state{buffer_left=Left, delayed_acks=Delayed}=State) -> - %% there's no more room, hold up the worker - %% not actually necessary to update buffer_left, but it could make - %% for interesting stats - {next_state, collect_output, - State#state{buffer_left=Left-1, delayed_acks=[From|Delayed]}}. - -%% send_output: waiting for output to send, after having been asked -%% for some while there wasn't any - -send_output(#pipe_result{ref=Ref, from=PhaseId, result=Res}, - #state{ref=Ref, results=Acc}=State) -> - NewAcc = add_result(PhaseId, Res, Acc), - NewState = send_to_owner(State#state{results=NewAcc}), - {next_state, collect_output, NewState}; -send_output(#pipe_log{ref=Ref, from=PhaseId, msg=Msg}, - #state{ref=Ref, logs=Acc}=State) -> - NewState = send_to_owner(State#state{logs=[{PhaseId, Msg}|Acc]}), - {next_state, collect_output, NewState}; -send_output(#pipe_eoi{ref=Ref}, #state{ref=Ref}=State) -> - NewState = send_to_owner(State#state{done=true}), - {stop, normal, NewState}; -send_output(_, State) -> - {next_state, send_output, State}. - -send_output(#pipe_result{ref=Ref, from=PhaseId, result=Res}, - _From, #state{ref=Ref, results=Acc}=State) -> - NewAcc = add_result(PhaseId, Res, Acc), - NewState = send_to_owner(State#state{results=NewAcc}), - {reply, ok, collect_output, NewState}; -send_output(#pipe_log{ref=Ref, from=PhaseId, msg=Msg}, - _From, #state{ref=Ref, logs=Acc}=State) -> - NewState = send_to_owner(State#state{logs=[{PhaseId, Msg}|Acc]}), - {reply, ok, collect_output, NewState}; -send_output(#pipe_eoi{ref=Ref}, _From, #state{ref=Ref}=State) -> - NewState = send_to_owner(State#state{done=true}), - {stop, normal, ok, NewState}; -send_output(_, _, State) -> - {next_state, send_output, State}. - -handle_event(_, StateName, State) -> - {next_state, StateName, State}. - -handle_sync_event(_, _, StateName, State) -> - {next_state, StateName, State}. - -%% Clusters containing nodes running Riak version 1.2 and previous -%% will send raw results, regardless of sink type. We can't block -%% these worker sending raw results, but we can still track these -%% additions, and block other workers because of them. -handle_info(#pipe_result{ref=Ref, from=PhaseId, result=Res}, - StateName, - #state{ref=Ref, results=Acc, buffer_left=Left}=State) -> - NewAcc = add_result(PhaseId, Res, Acc), - info_response(StateName, - State#state{results=NewAcc, buffer_left=Left-1}); -handle_info(#pipe_log{ref=Ref, from=PhaseId, msg=Msg}, - StateName, - #state{ref=Ref, logs=Acc, buffer_left=Left}=State) -> - info_response(StateName, - State#state{logs=[{PhaseId, Msg}|Acc], - buffer_left=Left-1}); -handle_info(#pipe_eoi{ref=Ref}, - StateName, #state{ref=Ref}=State) -> - info_response(StateName, State#state{done=true}); -handle_info({'DOWN', _, process, Pid, _Reason}, _, - #state{owner=Pid}=State) -> - %% exit as soon as the owner dies - {stop, normal, State}; -handle_info({'DOWN', _, process, Pid, Reason}, _, - #state{builder=Pid}=State) when Reason /= normal -> - %% don't stop when the builder exits 'normal', because that's - %% probably just the pipe shutting down normally - wait for the - %% owner to ask for the last outputs - {stop, normal, State}; -handle_info(_, StateName, State) -> - {next_state, StateName, State}. - -%% continue buffering, unless we've been waiting to reply; stop if we -%% were waiting to reply and we've received eoi -info_response(collect_output, State) -> - {next_state, collect_output, State}; -info_response(send_output, #state{done=Done}=State) -> - NewState = send_to_owner(State), - if Done -> {stop, normal, NewState}; - true -> {next_state, collect_output, NewState} - end. - -terminate(_, _, _) -> - ok. - -code_change(_, StateName, State, _) -> - {ok, StateName, State}. - -%% internal - -has_output(#state{results=[], logs=[]}) -> - false; -has_output(_) -> - true. - -%% also clears buffers -send_to_owner(#state{owner=Owner, ref=Ref, - results=Results, logs=Logs, done=Done, - buffer_max=Max, delayed_acks=Delayed}=State) -> - Owner ! #kv_mrc_sink{ref=Ref, - results=finish_results(Results), - logs=lists:reverse(Logs), - done=Done}, - [ gen_fsm:reply(From, ok) || From <- Delayed ], - State#state{results=[], logs=[], - buffer_left=Max, delayed_acks=[]}. - -%% results are kept as lists in a proplist -add_result(PhaseId, Result, Acc) -> - case lists:keytake(PhaseId, 1, Acc) of - {value, {PhaseId, IAcc}, RAcc} -> - [{PhaseId,[Result|IAcc]}|RAcc]; - false -> - [{PhaseId,[Result]}|Acc] - end. - -%% transform the proplist buffers into orddicts time-ordered -finish_results(Results) -> - [{I, lists:reverse(R)} || {I, R} <- lists:keysort(1, Results)]. - -%% choose buffer size, given Options, app env, default --spec buffer_size(list()) -> non_neg_integer(). -buffer_size(Options) -> - case buffer_size_options(Options) of - {ok, Size} -> Size; - false -> - case buffer_size_app_env() of - {ok, Size} -> Size; - false -> - ?BUFFER_SIZE_DEFAULT - end - end. - -<<<<<<< HEAD --spec buffer_size_options(list()) -> non_neg_integer(). -======= --spec buffer_size_options(list()) -> {ok, non_neg_integer()} | false. ->>>>>>> master -buffer_size_options(Options) -> - case lists:keyfind(buffer, 1, Options) of - {buffer, Size} when is_integer(Size), Size >= 0 -> - {ok, Size}; - _ -> - false - end. - -<<<<<<< HEAD --spec buffer_size_app_env() -> non_neg_integer(). -======= --spec buffer_size_app_env() -> {ok, non_neg_integer()} | false. ->>>>>>> master -buffer_size_app_env() -> - case application:get_env(riak_kv, mrc_sink_buffer) of - {ok, Size} when is_integer(Size), Size >= 0 -> - {ok, Size}; - _ -> - false - end. - -%% TEST - --ifdef(TEST). - -buffer_size_test_() -> - Tests = [ {"buffer option", 5, [{buffer, 5}], []}, - {"buffer app env", 5, [], [{mrc_sink_buffer, 5}]}, - {"buffer default", ?BUFFER_SIZE_DEFAULT, [], []} ], - {foreach, - fun() -> application:load(riak_kv) end, - fun(_) -> application:unload(riak_kv) end, - [buffer_size_test_helper(Name, Size, Options, AppEnv) - || {Name, Size, Options, AppEnv} <- Tests]}. - -buffer_size_test_helper(Name, Size, Options, AppEnv) -> - {Name, - fun() -> - application:load(riak_kv), - [ application:set_env(riak_kv, K, V) || {K, V} <- AppEnv ], - - %% start up our sink - {ok, Sink} = ?MODULE:start_link(self(), Options), - Ref = make_ref(), - Pipe = #pipe{builder=self(), - sink=#fitting{pid=Sink, ref=Ref}}, - ?MODULE:use_pipe(Sink, Pipe), - - %% fill its buffer - [ ok = gen_fsm:sync_send_event( - Sink, - #pipe_result{from=tester, ref=Ref, result=I}, - 1000) - || I <- lists:seq(1, Size) ], - - %% ensure extra result will block - {'EXIT',{timeout,{gen_fsm,sync_send_event,_}}} = - (catch gen_fsm:sync_send_event( - Sink, - #pipe_result{from=tester, ref=Ref, result=Size+1}, - 1000)), - - %% now drain what's there - ?MODULE:next(Sink), - - %% make sure that all results were received, including - %% blocked one - receive - #kv_mrc_sink{ref=Ref, results=[{tester,R}]} -> - ?assertEqual(Size+1, length(R)) - end, - %% make sure that the delayed ack was received - receive - {GenFsmRef, ok} when is_reference(GenFsmRef) -> - ok - end - end}. - --endif. diff --git a/src/riak_kv_put_fsm.erl.orig b/src/riak_kv_put_fsm.erl.orig deleted file mode 100644 index a01ee0847c..0000000000 --- a/src/riak_kv_put_fsm.erl.orig +++ /dev/null @@ -1,886 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_put_fsm: coordination of Riak PUT requests -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc coordination of Riak PUT requests - --module(riak_kv_put_fsm). -%-ifdef(TEST). --include_lib("eunit/include/eunit.hrl"). -%-endif. --include_lib("riak_kv_vnode.hrl"). --include_lib("riak_kv_js_pools.hrl"). --include("riak_kv_wm_raw.hrl"). - --behaviour(gen_fsm). --define(DEFAULT_OPTS, [{returnbody, false}, {update_last_modified, true}]). --export([start/3, start/6,start/7]). --export([start_link/3,start_link/6,start_link/7]). --ifdef(TEST). --export([test_link/4]). --endif. --export([init/1, handle_event/3, handle_sync_event/4, - handle_info/3, terminate/3, code_change/4]). --export([prepare/2, validate/2, precommit/2, - waiting_local_vnode/2, - waiting_remote_vnode/2, - postcommit/2, finish/2]). - --type detail_info() :: timing. --type detail() :: true | - false | - [detail_info()]. - --type option() :: - %% Min number of primary (owner) vnodes participating - {pw, non_neg_integer()} | - %% Minimum number of vnodes receiving write - {w, non_neg_integer()} | - %% Minimum number of vnodes completing write - {dw, non_neg_integer()} | - {timeout, timeout()} | - %% Prevent precommit/postcommit hooks from running - disable_hooks | - %% Request additional details about request added as extra - %% element at the end of result tuple - {details, detail()} | - %% Put the value as-is, do not increment the vclocks - %% to make the value a frontier. - asis. - --type options() :: [option()]. - --export_type([option/0, options/0, detail/0, detail_info/0]). - --record(state, {from :: {raw, integer(), pid()}, - robj :: riak_object:riak_object(), - options=[] :: options(), - n :: pos_integer(), - w :: non_neg_integer(), - dw :: non_neg_integer(), - coord_pl_entry :: {integer(), atom()}, - preflist2 :: riak_core_apl:preflist2(), - bkey :: {riak_object:bucket(), riak_object:key()}, - req_id :: pos_integer(), - starttime :: pos_integer(), % start time to send to vnodes - timeout :: pos_integer()|infinity, - tref :: reference(), - vnode_options=[] :: list(), - returnbody :: boolean(), - allowmult :: boolean(), - precommit=[] :: list(), - postcommit=[] :: list(), - bucket_props:: list(), - putcore :: riak_kv_put_core:putcore(), - put_usecs :: undefined | non_neg_integer(), - timing = [] :: [{atom(), {non_neg_integer(), non_neg_integer(), - non_neg_integer()}}], - reply, % reply sent to client - tracked_bucket=false :: boolean() %% tracke per bucket stats - }). - --include("riak_kv_dtrace.hrl"). - --define(PARSE_INDEX_PRECOMMIT, {struct, [{<<"mod">>, <<"riak_index">>}, {<<"fun">>, <<"parse_object_hook">>}]}). --define(DEFAULT_TIMEOUT, 60000). - -%% =================================================================== -%% Public API -%% =================================================================== - -start(From, Object, PutOptions) -> - gen_fsm:start(?MODULE, [From, Object, PutOptions], []). - -%% In place only for backwards compatibility -start(ReqId,RObj,W,DW,Timeout,ResultPid) -> - start_link(ReqId,RObj,W,DW,Timeout,ResultPid,[]). - -%% In place only for backwards compatibility -start(ReqId,RObj,W,DW,Timeout,ResultPid,Options) -> - start_link(ReqId,RObj,W,DW,Timeout,ResultPid,Options). - -start_link(ReqId,RObj,W,DW,Timeout,ResultPid) -> - start_link(ReqId,RObj,W,DW,Timeout,ResultPid,[]). - -start_link(ReqId,RObj,W,DW,Timeout,ResultPid,Options) -> - start_link({raw, ReqId, ResultPid}, RObj, [{w, W}, {dw, DW}, {timeout, Timeout} | Options]). - -start_link(From, Object, PutOptions) -> - gen_fsm:start_link(?MODULE, [From, Object, PutOptions], []). - -%% =================================================================== -%% Test API -%% =================================================================== - --ifdef(TEST). -%% Create a put FSM for testing. StateProps must include -%% starttime - start time in gregorian seconds -%% n - N-value for request (is grabbed from bucket props in prepare) -%% bkey - Bucket / Key -%% bucket_props - bucket properties -%% preflist2 - [{{Idx,Node},primary|fallback}] preference list -%% -%% As test, but linked to the caller -test_link(From, Object, PutOptions, StateProps) -> - gen_fsm:start_link(?MODULE, {test, [From, Object, PutOptions], StateProps}, []). - --endif. - - -%% ==================================================================== -%% gen_fsm callbacks -%% ==================================================================== - -%% @private -init([From, RObj, Options]) -> - BKey = {Bucket, Key} = {riak_object:bucket(RObj), riak_object:key(RObj)}, - StateData = add_timing(prepare, #state{from = From, - robj = RObj, - bkey = BKey, - options = Options}), -<<<<<<< HEAD -======= - riak_kv_get_put_monitor:put_fsm_spawned(self()), ->>>>>>> master - riak_core_dtrace:put_tag(io_lib:format("~p,~p", [Bucket, Key])), - case riak_kv_util:is_x_deleted(RObj) of - true -> - TombNum = 1, - TombStr = <<"tombstone">>; - false -> - TombNum = 0, - TombStr = <<>> - end, - ?DTRACE(?C_PUT_FSM_INIT, [TombNum], ["init", TombStr]), - {ok, prepare, StateData, 0}; -init({test, Args, StateProps}) -> - %% Call normal init - {ok, prepare, StateData, 0} = init(Args), - - %% Then tweak the state record with entries provided by StateProps - Fields = record_info(fields, state), - FieldPos = lists:zip(Fields, lists:seq(2, length(Fields)+1)), - F = fun({Field, Value}, State0) -> - Pos = proplists:get_value(Field, FieldPos), - setelement(Pos, State0, Value) - end, - TestStateData = lists:foldl(F, StateData, StateProps), - - %% Enter into the validate state, skipping any code that relies on the - %% state of the rest of the system - {ok, validate, TestStateData, 0}. - -%% @private -prepare(timeout, StateData0 = #state{from = From, robj = RObj, - bkey = BKey, - options = Options}) -> - {ok,Ring} = riak_core_ring_manager:get_my_ring(), - BucketProps = riak_core_bucket:get_bucket(riak_object:bucket(RObj), Ring), - DocIdx = riak_core_util:chash_key(BKey), - N = proplists:get_value(n_val,BucketProps), - StatTracked = proplists:get_value(stat_tracked, BucketProps, false), - UpNodes = riak_core_node_watcher:nodes(riak_kv), - Preflist2 = riak_core_apl:get_apl_ann(DocIdx, N, Ring, UpNodes), - %% Check if this node is in the preference list so it can coordinate - LocalPL = [IndexNode || {{_Index, Node} = IndexNode, _Type} <- Preflist2, - Node == node()], - Must = (get_option(asis, Options, false) /= true), - case {Preflist2, LocalPL =:= [] andalso Must == true} of - {[], _} -> - %% Empty preflist - ?DTRACE(?C_PUT_FSM_PREPARE, [-1], ["prepare",<<"all nodes down">>]), - process_reply({error, all_nodes_down}, StateData0); - {_, true} -> - %% This node is not in the preference list -<<<<<<< HEAD - %% forward on to the first node - [{{_Idx, CoordNode},_Type}|_] = Preflist2, - Timeout = get_option(timeout, Options, ?DEFAULT_TIMEOUT), - ?DTRACE(?C_PUT_FSM_PREPARE, [1], - ["prepare", atom2list(CoordNode)]), - case rpc:call(CoordNode,riak_kv_put_fsm_sup,start_put_fsm,[CoordNode,[From,RObj,Options]],Timeout) of - {ok, _Pid} -> - ?DTRACE(?C_PUT_FSM_PREPARE, [2], - ["prepare", atom2list(CoordNode)]), - riak_kv_stat:update(coord_redir), - {stop, normal, StateData0}; - {_, Reason} -> % {error,_} or {badrpc,_} -======= - %% forward on to a random node - ListPos = crypto:rand_uniform(1, length(Preflist2)), - {{_Idx, CoordNode},_Type} = lists:nth(ListPos, Preflist2), - _Timeout = get_option(timeout, Options, ?DEFAULT_TIMEOUT), - ?DTRACE(?C_PUT_FSM_PREPARE, [1], - ["prepare", atom2list(CoordNode)]), - try - proc_lib:spawn(CoordNode,riak_kv_put_fsm,start_link,[From,RObj,Options]), - ?DTRACE(?C_PUT_FSM_PREPARE, [2], - ["prepare", atom2list(CoordNode)]), - riak_kv_stat:update(coord_redir), - {stop, normal, StateData0} - catch - _:Reason -> ->>>>>>> master - ?DTRACE(?C_PUT_FSM_PREPARE, [-2], - ["prepare", dtrace_errstr(Reason)]), - lager:error("Unable to forward put for ~p to ~p - ~p\n", - [BKey, CoordNode, Reason]), - process_reply({error, {coord_handoff_failed, Reason}}, StateData0) - end; - _ -> - %% Putting asis, no need to handle locally on a node in the - %% preflist, can coordinate from anywhere - CoordPLEntry = case Must of - true -> - hd(LocalPL); - _ -> - undefined - end, - CoordPlNode = case CoordPLEntry of - undefined -> undefined; - {_Idx, Nd} -> atom2list(Nd) - end, - %% This node is in the preference list, continue - StartTime = riak_core_util:moment(), - StateData = StateData0#state{n = N, - bucket_props = BucketProps, - coord_pl_entry = CoordPLEntry, - preflist2 = Preflist2, - starttime = StartTime, - tracked_bucket = StatTracked}, - ?DTRACE(?C_PUT_FSM_PREPARE, [0], ["prepare", CoordPlNode]), - new_state_timeout(validate, StateData) - end. - -%% @private -validate(timeout, StateData0 = #state{from = {raw, ReqId, _Pid}, - options = Options0, - n=N, bucket_props = BucketProps, - preflist2 = Preflist2}) -> - Timeout = get_option(timeout, Options0, ?DEFAULT_TIMEOUT), - PW0 = get_option(pw, Options0, default), - W0 = get_option(w, Options0, default), - DW0 = get_option(dw, Options0, default), - - PW = riak_kv_util:expand_rw_value(pw, PW0, BucketProps, N), - W = riak_kv_util:expand_rw_value(w, W0, BucketProps, N), - - %% Expand the DW value, but also ensure that DW <= W - DW1 = riak_kv_util:expand_rw_value(dw, DW0, BucketProps, N), - %% If no error occurred expanding DW also ensure that DW <= W - case DW1 of - error -> - DW = error; - _ -> - %% DW must always be 1 with node-based vclocks. - %% The coord vnode is responsible for incrementing the vclock - DW = erlang:min(DW1, erlang:max(1, W)) - end, - NumPrimaries = length([x || {_,primary} <- Preflist2]), - NumVnodes = length(Preflist2), - MinVnodes = erlang:max(1, erlang:max(W, DW)), % always need at least one vnode - if - PW =:= error -> - process_reply({error, {pw_val_violation, PW0}}, StateData0); - W =:= error -> - process_reply({error, {w_val_violation, W0}}, StateData0); - DW =:= error -> - process_reply({error, {dw_val_violation, DW0}}, StateData0); - (W > N) or (DW > N) or (PW > N) -> - process_reply({error, {n_val_violation, N}}, StateData0); - PW > NumPrimaries -> - process_reply({error, {pw_val_unsatisfied, PW, NumPrimaries}}, StateData0); - NumVnodes < MinVnodes -> - process_reply({error, {insufficient_vnodes, NumVnodes, - need, MinVnodes}}, StateData0); - true -> - AllowMult = proplists:get_value(allow_mult,BucketProps), - Disable = proplists:get_bool(disable_hooks, Options0), - Precommit = - if Disable -> []; - true -> - L = get_hooks(precommit, BucketProps), - L ++ [?PARSE_INDEX_PRECOMMIT] - end, - Postcommit = - if Disable -> []; - true -> get_hooks(postcommit, BucketProps) - end, - StateData1 = StateData0#state{n=N, w=W, dw=DW, allowmult=AllowMult, - precommit = Precommit, - postcommit = Postcommit, - req_id = ReqId, - timeout = Timeout}, - Options = flatten_options(proplists:unfold(Options0 ++ ?DEFAULT_OPTS), []), - StateData2 = handle_options(Options, StateData1), - StateData3 = apply_updates(StateData2), - StateData = init_putcore(StateData3), - ?DTRACE(?C_PUT_FSM_VALIDATE, [N, W, PW, DW], []), - case Precommit of - [] -> % Nothing to run, spare the timing code - execute(StateData); - _ -> - new_state_timeout(precommit, StateData) - end - end. - -%% Run the precommit hooks -precommit(timeout, State = #state{precommit = []}) -> - execute(State); -precommit(timeout, State = #state{precommit = [Hook | Rest], robj = RObj}) -> - Result = decode_precommit(invoke_hook(Hook, RObj)), - case Result of - fail -> - ?DTRACE(?C_PUT_FSM_PRECOMMIT, [-1], []), - process_reply({error, precommit_fail}, State); - {fail, Reason} -> - ?DTRACE(?C_PUT_FSM_PRECOMMIT, [-1], [dtrace_errstr(Reason)]), - process_reply({error, {precommit_fail, Reason}}, State); - Result -> - ?DTRACE(?C_PUT_FSM_PRECOMMIT, [0], []), - {next_state, precommit, State#state{robj = riak_object:apply_updates(Result), - precommit = Rest}, 0} - end. - -%% @private -execute(State=#state{coord_pl_entry = CPL}) -> - case CPL of - undefined -> - execute_remote(State); - _ -> - execute_local(State) - end. - -%% @private -%% Send the put coordinating put requests to the local vnode - the returned object -%% will guarantee a frontier object. -%% N.B. Not actually a state - here in the source to make reading the flow easier -execute_local(StateData=#state{robj=RObj, req_id = ReqId, - timeout=Timeout, bkey=BKey, - coord_pl_entry = {_Index, Node} = CoordPLEntry, - vnode_options=VnodeOptions, - starttime = StartTime}) -> - ?DTRACE(?C_PUT_FSM_EXECUTE_LOCAL, [], [atom2list(Node)]), - StateData1 = add_timing(execute_local, StateData), - TRef = schedule_timeout(Timeout), - riak_kv_vnode:coord_put(CoordPLEntry, BKey, RObj, ReqId, StartTime, VnodeOptions), - StateData2 = StateData1#state{robj = RObj, tref = TRef}, - %% Must always wait for local vnode - it contains the object with updated vclock - %% to use for the remotes. (Ignore optimization for N=1 case for now). - new_state(waiting_local_vnode, StateData2). - -%% @private -waiting_local_vnode(request_timeout, StateData) -> - ?DTRACE(?C_PUT_FSM_WAITING_LOCAL_VNODE, [-1], []), - process_reply({error,timeout}, StateData); -waiting_local_vnode(Result, StateData = #state{putcore = PutCore}) -> - UpdPutCore1 = riak_kv_put_core:add_result(Result, PutCore), - case Result of - {fail, Idx, _ReqId} -> - ?DTRACE(?C_PUT_FSM_WAITING_LOCAL_VNODE, [-1], - [integer_to_list(Idx)]), - %% Local vnode failure is enough to sink whole operation - process_reply({error, local_put_failed}, StateData#state{putcore = UpdPutCore1}); - {w, Idx, _ReqId} -> - ?DTRACE(?C_PUT_FSM_WAITING_LOCAL_VNODE, [1], - [integer_to_list(Idx)]), - {next_state, waiting_local_vnode, StateData#state{putcore = UpdPutCore1}}; - {dw, Idx, PutObj, _ReqId} -> - %% Either returnbody is true or coord put merged with the existing - %% object and bumped the vclock. Either way use the returned - %% object for the remote vnode - ?DTRACE(?C_PUT_FSM_WAITING_LOCAL_VNODE, [2], - [integer_to_list(Idx)]), - execute_remote(StateData#state{robj = PutObj, putcore = UpdPutCore1}); - {dw, Idx, _ReqId} -> - %% Write succeeded without changes to vclock required and returnbody false - ?DTRACE(?C_PUT_FSM_WAITING_LOCAL_VNODE, [2], - [integer_to_list(Idx)]), - execute_remote(StateData#state{putcore = UpdPutCore1}) - end. - -%% @private -%% Send the put requests to any remote nodes if necessary and decided if -%% enough responses have been received yet (i.e. if W/DW=1) -%% N.B. Not actually a state - here in the source to make reading the flow easier -execute_remote(StateData=#state{robj=RObj, req_id = ReqId, - preflist2 = Preflist2, bkey=BKey, - coord_pl_entry = CoordPLEntry, - vnode_options=VnodeOptions, - putcore=PutCore, - starttime = StartTime}) -> - StateData1 = add_timing(execute_remote, StateData), - Preflist = [IndexNode || {IndexNode, _Type} <- Preflist2, - IndexNode /= CoordPLEntry], - Ps = [[atom2list(Nd), $,, integer_to_list(Idx)] || - {Idx, Nd} <- lists:sublist(Preflist, 4)], - ?DTRACE(?C_PUT_FSM_EXECUTE_REMOTE, [], [Ps]), - riak_kv_vnode:put(Preflist, BKey, RObj, ReqId, StartTime, VnodeOptions), - case riak_kv_put_core:enough(PutCore) of - true -> - {Reply, UpdPutCore} = riak_kv_put_core:response(PutCore), - process_reply(Reply, StateData#state{putcore = UpdPutCore}); - false -> - new_state(waiting_remote_vnode, StateData1) - end. - - -%% @private -waiting_remote_vnode(request_timeout, StateData) -> - ?DTRACE(?C_PUT_FSM_WAITING_REMOTE_VNODE, [-1], []), - process_reply({error,timeout}, StateData); -waiting_remote_vnode(Result, StateData = #state{putcore = PutCore}) -> - ShortCode = riak_kv_put_core:result_shortcode(Result), - IdxStr = integer_to_list(riak_kv_put_core:result_idx(Result)), - ?DTRACE(?C_PUT_FSM_WAITING_REMOTE_VNODE, [ShortCode], [IdxStr]), - UpdPutCore1 = riak_kv_put_core:add_result(Result, PutCore), - case riak_kv_put_core:enough(UpdPutCore1) of - true -> - {Reply, UpdPutCore2} = riak_kv_put_core:response(UpdPutCore1), - process_reply(Reply, StateData#state{putcore = UpdPutCore2}); - false -> - {next_state, waiting_remote_vnode, StateData#state{putcore = UpdPutCore1}} - end. - -%% @private -postcommit(timeout, StateData = #state{postcommit = []}) -> - ?DTRACE(?C_PUT_FSM_POSTCOMMIT, [0], []), - new_state_timeout(finish, StateData); -postcommit(timeout, StateData = #state{postcommit = [Hook | Rest], - putcore = PutCore}) -> - ?DTRACE(?C_PUT_FSM_POSTCOMMIT, [-2], []), - %% Process the next hook - gives sys:get_status messages a chance if hooks - %% take a long time. - {ReplyObj, UpdPutCore} = riak_kv_put_core:final(PutCore), - decode_postcommit(invoke_hook(Hook, ReplyObj)), - {next_state, postcommit, StateData#state{postcommit = Rest, - putcore = UpdPutCore}, 0}; -postcommit(request_timeout, StateData) -> % still process hooks even if request timed out - ?DTRACE(?C_PUT_FSM_POSTCOMMIT, [-3], []), - {next_state, postcommit, StateData, 0}; -postcommit(Reply, StateData = #state{putcore = PutCore}) -> - ShortCode = riak_kv_put_core:result_shortcode(Reply), - IdxStr = integer_to_list(riak_kv_put_core:result_idx(Reply)), - ?DTRACE(?C_PUT_FSM_POSTCOMMIT, [0, ShortCode], [IdxStr]), - %% late responses - add to state. *Does not* recompute finalobj - UpdPutCore = riak_kv_put_core:add_result(Reply, PutCore), - {next_state, postcommit, StateData#state{putcore = UpdPutCore}, 0}. - -finish(timeout, StateData = #state{timing = Timing, reply = Reply, - bkey = {Bucket, _Key}, - tracked_bucket = StatTracked}) -> - case Reply of - {error, _} -> - ?DTRACE(?C_PUT_FSM_FINISH, [-1], []), - ok; - _Ok -> - %% TODO: Improve reporting of timing - %% For now can add debug tracers to view the return from calc_timing - {Duration, Stages} = riak_kv_fsm_timing:calc_timing(Timing), - riak_kv_stat:update({put_fsm_time, Bucket, Duration, Stages, StatTracked}), - ?DTRACE(?C_PUT_FSM_FINISH, [0, Duration], []) - end, - {stop, normal, StateData}; -finish(Reply, StateData = #state{putcore = PutCore}) -> - ShortCode = riak_kv_put_core:result_shortcode(Reply), - IdxStr = integer_to_list(riak_kv_put_core:result_idx(Reply)), - ?DTRACE(?C_PUT_FSM_FINISH, [1, ShortCode], [IdxStr]), - %% late responses - add to state. *Does not* recompute finalobj - UpdPutCore = riak_kv_put_core:add_result(Reply, PutCore), - {next_state, finish, StateData#state{putcore = UpdPutCore}, 0}. - - -%% @private -handle_event(_Event, _StateName, StateData) -> - {stop,badmsg,StateData}. - -%% @private -handle_sync_event(_Event, _From, _StateName, StateData) -> - {stop,badmsg,StateData}. - -%% @private - -handle_info(request_timeout, StateName, StateData) -> - ?MODULE:StateName(request_timeout, StateData); -handle_info(_Info, _StateName, StateData) -> - {stop,badmsg,StateData}. - -%% @private -terminate(Reason, _StateName, _State) -> - Reason. - -%% @private -code_change(_OldVsn, StateName, State, _Extra) -> {ok, StateName, State}. - -%% ==================================================================== -%% Internal functions -%% ==================================================================== - -%% Move to the new state, marking the time it started -new_state(StateName, StateData) -> - {next_state, StateName, add_timing(StateName, StateData)}. - -%% Move to the new state, marking the time it started and trigger an immediate -%% timeout. -new_state_timeout(StateName, StateData) -> - {next_state, StateName, add_timing(StateName, StateData), 0}. - -%% What to do once enough responses from vnodes have been received to reply -process_reply(Reply, StateData = #state{postcommit = PostCommit, - putcore = PutCore, - robj = RObj, - bkey = {Bucket, Key}}) -> - StateData1 = client_reply(Reply, StateData), - StateData2 = case PostCommit of - [] -> - StateData1; - _ -> - %% If postcommits defined, calculate final object - %% before any replies received after responding to - %% the client for a consistent view. - {_, UpdPutCore} = riak_kv_put_core:final(PutCore), - StateData1#state{putcore = UpdPutCore} - end, - case Reply of - ok -> - ?DTRACE(?C_PUT_FSM_PROCESS_REPLY, [0], []), - new_state_timeout(postcommit, StateData2); - {ok, _} -> - Values = riak_object:get_values(RObj), - %% TODO: more accurate sizing method - ApproxBytes = size(Bucket) + size(Key) + - lists:sum([size(V) || V <- Values]), - NumSibs = length(Values), - ?DTRACE(?C_PUT_FSM_PROCESS_REPLY, [1, ApproxBytes, NumSibs], []), - new_state_timeout(postcommit, StateData2); - _ -> - ?DTRACE(?C_PUT_FSM_PROCESS_REPLY, [-1], []), - new_state_timeout(finish, StateData2) - end. - - -%% -%% Given an expanded proplist of options, take the first entry for any given key -%% and ignore the rest -%% -%% @private -flatten_options([], Opts) -> - Opts; -flatten_options([{Key, Value} | Rest], Opts) -> - case lists:keymember(Key, 1, Opts) of - true -> - flatten_options(Rest, Opts); - false -> - flatten_options(Rest, [{Key, Value} | Opts]) - end. - -%% @private -handle_options([], State) -> - State; -handle_options([{update_last_modified, false}|T], State) -> - handle_options(T, State); -handle_options([{update_last_modified, true}|T], State = #state{robj = RObj}) -> - handle_options(T, State#state{robj = update_last_modified(RObj)}); -handle_options([{returnbody, true}|T], State) -> - VnodeOpts = [{returnbody, true} | State#state.vnode_options], - %% Force DW>0 if requesting return body to ensure the dw event - %% returned by the vnode includes the object. - handle_options(T, State#state{vnode_options=VnodeOpts, - dw=erlang:max(1,State#state.dw), - returnbody=true}); -handle_options([{returnbody, false}|T], State = #state{postcommit = Postcommit}) -> - case Postcommit of - [] -> - handle_options(T, State#state{returnbody=false}); - - _ -> - %% We have post-commit hooks, we'll need to get the body back - %% from the vnode, even though we don't plan to return that to the - %% original caller. Force DW>0 to ensure the dw event returned by - %% the vnode includes the object. - VnodeOpts = [{returnbody, true} | State#state.vnode_options], - handle_options(T, State#state{vnode_options=VnodeOpts, - dw=erlang:max(1,State#state.dw), - returnbody=false}) - end; -handle_options([{_,_}|T], State) -> handle_options(T, State). - -init_putcore(State = #state{n = N, w = W, dw = DW, allowmult = AllowMult, - returnbody = ReturnBody}) -> - PutCore = riak_kv_put_core:init(N, W, DW, - N-W+1, % cannot ever get W replies - N-DW+1, % cannot ever get DW replies - AllowMult, - ReturnBody), - State#state{putcore = PutCore}. - - -%% Apply any pending updates to robj -apply_updates(State = #state{robj = RObj}) -> - State#state{robj = riak_object:apply_updates(RObj)}. - -%% -%% Update X-Riak-VTag and X-Riak-Last-Modified in the object's metadata, if -%% necessary. -%% -%% @private -update_last_modified(RObj) -> - MD0 = case dict:find(clean, riak_object:get_update_metadata(RObj)) of - {ok, true} -> - %% There have been no changes to updatemetadata. If we stash the - %% last modified in this dict, it will cause us to lose existing - %% metadata (bz://508). If there is only one instance of metadata, - %% we can safely update that one, but in the case of multiple siblings, - %% it's hard to know which one to use. In that situation, use the update - %% metadata as is. - case riak_object:get_metadatas(RObj) of - [MD] -> - MD; - _ -> - riak_object:get_update_metadata(RObj) - end; - _ -> - riak_object:get_update_metadata(RObj) - end, - %% Post-0.14.2 changed vtags to be generated from node/now rather the vclocks. - %% The vclock has not been updated at this point. Vtags/etags should really - %% be an external interface concern and are only used for sibling selection - %% and if-modified type tests so they could be generated on retrieval instead. - %% This changes from being a hash on the value to a likely-to-be-unique value - %% which should serve the same purpose. It was possible to generate two - %% objects with the same vclock on 0.14.2 if the same clientid was used in - %% the same second. It can be revisited post-1.0.0. - Now = os:timestamp(), - NewMD = dict:store(?MD_VTAG, make_vtag(Now), - dict:store(?MD_LASTMOD, Now, MD0)), - riak_object:update_metadata(RObj, NewMD). - -make_vtag(Now) -> - <> = crypto:md5(term_to_binary({node(), Now})), - riak_core_util:integer_to_list(HashAsNum,62). - -%% Invokes the hook and returns a tuple of -%% {Lang, Called, Result} -%% Where Called = {Mod, Fun} if Lang = erlang -%% Called = JSName if Lang = javascript -invoke_hook({struct, Hook}=HookDef, RObj) -> - Mod = proplists:get_value(<<"mod">>, Hook), - Fun = proplists:get_value(<<"fun">>, Hook), - JSName = proplists:get_value(<<"name">>, Hook), - if (Mod == undefined orelse Fun == undefined) andalso JSName == undefined -> - {error, {invalid_hook_def, HookDef}}; - true -> invoke_hook(Mod, Fun, JSName, RObj) - end; -invoke_hook(HookDef, _RObj) -> - {error, {invalid_hook_def, HookDef}}. - -invoke_hook(Mod0, Fun0, undefined, RObj) when Mod0 /= undefined, Fun0 /= undefined -> - Mod = binary_to_atom(Mod0, utf8), - Fun = binary_to_atom(Fun0, utf8), - try - {erlang, {Mod, Fun}, Mod:Fun(RObj)} - catch - Class:Exception -> - {erlang, {Mod, Fun}, {'EXIT', Mod, Fun, Class, Exception}} - end; -invoke_hook(undefined, undefined, JSName, RObj) when JSName /= undefined -> - {js, JSName, riak_kv_js_manager:blocking_dispatch(?JSPOOL_HOOK, {{jsfun, JSName}, RObj}, 5)}; -invoke_hook(_, _, _, _) -> - {error, {invalid_hook_def, no_hook}}. - --spec decode_precommit(any()) -> fail | {fail, any()} | riak_object:riak_object(). -decode_precommit({erlang, {Mod, Fun}, Result}) -> - %% TODO: For DTrace things, we will err on the side of taking the - %% time to format the error results into strings to pass to - %% the probes. If this ends up being too slow, then revisit. - case Result of - fail -> - ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-1], []), - riak_kv_stat:update(precommit_fail), - lager:debug("Pre-commit hook ~p:~p failed, no reason given", - [Mod, Fun]), - fail; - {fail, Reason} -> - ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-2], [dtrace_errstr(Reason)]), - riak_kv_stat:update(precommit_fail), - lager:debug("Pre-commit hook ~p:~p failed with reason ~p", - [Mod, Fun, Reason]), - Result; - {'EXIT', Mod, Fun, Class, Exception} -> - ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-3], - [dtrace_errstr({Mod, Fun, Class, Exception})]), - riak_kv_stat:update(precommit_fail), - lager:debug("Problem invoking pre-commit hook ~p:~p -> ~p:~p~n~p", - [Mod,Fun,Class,Exception, erlang:get_stacktrace()]), - {fail, {hook_crashed, {Mod, Fun, Class, Exception}}}; - Obj -> - try - riak_object:ensure_robject(Obj) - catch X:Y -> - ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-4], - [dtrace_errstr({Mod, Fun, X, Y})]), - riak_kv_stat:update(precommit_fail), - lager:debug("Problem invoking pre-commit hook ~p:~p," - " invalid return ~p", - [Mod, Fun, Result]), - {fail, {invalid_return, {Mod, Fun, Result}}} - - end - end; -decode_precommit({js, JSName, Result}) -> - case Result of - {ok, <<"fail">>} -> - ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-5], []), - riak_kv_stat:update(precommit_fail), - lager:debug("Pre-commit hook ~p failed, no reason given", - [JSName]), - fail; - {ok, [{<<"fail">>, Message}]} -> - ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-6],[dtrace_errstr(Message)]), - riak_kv_stat:update(precommit_fail), - lager:debug("Pre-commit hook ~p failed with reason ~p", - [JSName, Message]), - {fail, Message}; - {ok, Json} -> - case catch riak_object:from_json(Json) of - {'EXIT', _} -> - ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-7], []), - {fail, {invalid_return, {JSName, Json}}}; - Obj -> - Obj - end; - {error, Error} -> - riak_kv_stat:update(precommit_fail), - ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-7], [dtrace_errstr(Error)]), - lager:debug("Problem invoking pre-commit hook: ~p", [Error]), - fail - end; -decode_precommit({error, Reason}) -> - ?DTRACE(?C_PUT_FSM_DECODE_PRECOMMIT, [-8], [dtrace_errstr(Reason)]), - riak_kv_stat:update(precommit_fail), - lager:debug("Problem invoking pre-commit hook: ~p", [Reason]), - {fail, Reason}. - -decode_postcommit({erlang, {M,F}, Res}) -> - case Res of - fail -> - ?DTRACE(?C_PUT_FSM_DECODE_POSTCOMMIT, [-1], []), - riak_kv_stat:update(postcommit_fail), - lager:debug("Post-commit hook ~p:~p failed, no reason given", - [M, F]); - {fail, Reason} -> - ?DTRACE(?C_PUT_FSM_DECODE_POSTCOMMIT, [-2],[dtrace_errstr(Reason)]), - riak_kv_stat:update(postcommit_fail), - lager:debug("Post-commit hook ~p:~p failed with reason ~p", - [M, F, Reason]); - {'EXIT', _, _, Class, Ex} -> - ?DTRACE(?C_PUT_FSM_DECODE_POSTCOMMIT, [-3], - [dtrace_errstr({M, F, Class, Ex})]), - riak_kv_stat:update(postcommit_fail), - Stack = erlang:get_stacktrace(), - lager:debug("Problem invoking post-commit hook ~p:~p -> ~p:~p~n~p", - [M, F, Class, Ex, Stack]), - ok; - _ -> - ok - end; -decode_postcommit({error, {invalid_hook_def, Def}}) -> - ?DTRACE(?C_PUT_FSM_DECODE_POSTCOMMIT, [-4], [dtrace_errstr(Def)]), - riak_kv_stat:update(postcommit_fail), - lager:debug("Invalid post-commit hook definition ~p", [Def]). - - -get_hooks(HookType, BucketProps) -> - Hooks = proplists:get_value(HookType, BucketProps, []), - case Hooks of - <<"none">> -> - []; - Hooks when is_list(Hooks) -> - Hooks - end. - -get_option(Name, Options, Default) -> - proplists:get_value(Name, Options, Default). - -schedule_timeout(infinity) -> - undefined; -schedule_timeout(Timeout) -> - erlang:send_after(Timeout, self(), request_timeout). - -client_reply(Reply, State = #state{from = {raw, ReqId, Pid}, options = Options}) -> - State2 = add_timing(reply, State), - Reply2 = case proplists:get_value(details, Options, false) of - false -> - Reply; - [] -> - Reply; - Details -> - add_client_info(Reply, Details, State2) - end, - Pid ! {ReqId, Reply2}, - add_timing(reply, State2#state{reply = Reply}). - -add_client_info(Reply, Details, State) -> - Info = client_info(Details, State, []), - case Reply of - ok -> - {ok, Info}; - {OkError, ObjReason} -> - {OkError, ObjReason, Info} - end. - -client_info(true, StateData, Info) -> - client_info(default_details(), StateData, Info); -client_info([], _StateData, Info) -> - Info; -client_info([timing | Rest], StateData = #state{timing = Timing}, Info) -> - %% Duration is time from receiving request to responding - {ResponseUsecs, Stages} = riak_kv_fsm_timing:calc_timing(Timing), - client_info(Rest, StateData, [{response_usecs, ResponseUsecs}, - {stages, Stages} | Info]). - -default_details() -> - [timing]. - - -%% Add timing information to the state -add_timing(Stage, State = #state{timing = Timing}) -> - State#state{timing = riak_kv_fsm_timing:add_timing(Stage, Timing)}. - -atom2list(A) when is_atom(A) -> - atom_to_list(A); -atom2list(P) when is_pid(P)-> - pid_to_list(P). % eunit tests - -dtrace_errstr(Term) -> - io_lib:format("~P", [Term, 12]). - -%% =================================================================== -%% EUnit tests -%% =================================================================== --ifdef(TEST). - -make_vtag_test() -> - crypto:start(), - ?assertNot(make_vtag(now()) =:= - make_vtag(now())). - --endif. diff --git a/src/riak_kv_stat.erl.orig b/src/riak_kv_stat.erl.orig deleted file mode 100644 index 3a1750e31d..0000000000 --- a/src/riak_kv_stat.erl.orig +++ /dev/null @@ -1,383 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_stat: collect, aggregate, and provide stats about the local node -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc riak_kv_stat is a module for aggregating -%% stats about the Riak node on which it is runing. -%% -%% Update each stat with the exported function update/1. Add -%% a new stat to the internal stats/0 func to register a new stat with -%% folsom. -%% -%% Get the latest aggregation of stats with the exported function -%% get_stats/0. Or use folsom_metrics:get_metric_value/1, -%% or riak_core_stat_q:get_stats/1. -%% - --module(riak_kv_stat). - --behaviour(gen_server). - -%% API --export([start_link/0, get_stats/0, - update/1, register_stats/0, produce_stats/0, - leveldb_read_block_errors/0]). - --export([track_bucket/1, untrack_bucket/1]). - -%% gen_server callbacks --export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3]). - -<<<<<<< HEAD -======= -% TODO temporarily silence warnings --export([get_put_monitor_stats/0]). ->>>>>>> master --define(SERVER, ?MODULE). --define(APP, riak_kv). - -start_link() -> - gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). - -register_stats() -> - [(catch folsom_metrics:delete_metric(Stat)) || Stat <- folsom_metrics:get_metrics(), - is_tuple(Stat), element(1, Stat) == ?APP], - [register_stat(stat_name(Name), Type) || {Name, Type} <- stats()], - riak_core_stat_cache:register_app(?APP, {?MODULE, produce_stats, []}). - -%% @spec get_stats() -> proplist() -%% @doc Get the current aggregation of stats. -get_stats() -> - case riak_core_stat_cache:get_stats(?APP) of - {ok, Stats, _TS} -> - Stats; - Error -> Error - end. - -update(Arg) -> - gen_server:cast(?SERVER, {update, Arg}). - -track_bucket(Bucket) when is_binary(Bucket) -> - riak_core_bucket:set_bucket(Bucket, [{stat_tracked, true}]). - -untrack_bucket(Bucket) when is_binary(Bucket) -> - riak_core_bucket:set_bucket(Bucket, [{stat_tracked, false}]). - -%% gen_server - -init([]) -> - register_stats(), - {ok, ok}. - -handle_call(_Req, _From, State) -> - {reply, ok, State}. - -handle_cast({update, Arg}, State) -> - do_update(Arg), - {noreply, State}; -handle_cast(_Req, State) -> - {noreply, State}. - -handle_info(_Info, State) -> - {noreply, State}. - -terminate(_Reason, _State) -> - ok. - -code_change(_OldVsn, State, _Extra) -> - {ok, State}. - -%% @doc Update the given stat -do_update({vnode_get, Idx, USecs}) -> - folsom_metrics:notify_existing_metric({?APP, vnode, gets}, 1, spiral), - create_or_update({?APP, vnode, gets, time}, USecs, histogram), - do_per_index(gets, Idx, USecs); -do_update({vnode_put, Idx, USecs}) -> - folsom_metrics:notify_existing_metric({?APP, vnode, puts}, 1, spiral), - create_or_update({?APP, vnode, puts, time}, USecs, histogram), - do_per_index(puts, Idx, USecs); -do_update(vnode_index_read) -> - folsom_metrics:notify_existing_metric({?APP, vnode, index, reads}, 1, spiral); -do_update({vnode_index_write, PostingsAdded, PostingsRemoved}) -> - folsom_metrics:notify_existing_metric({?APP, vnode, index, writes}, 1, spiral), - folsom_metrics:notify_existing_metric({?APP, vnode, index, writes, postings}, PostingsAdded, spiral), - folsom_metrics:notify_existing_metric({?APP, vnode, index, deletes, postings}, PostingsRemoved, spiral); -do_update({vnode_index_delete, Postings}) -> - folsom_metrics:notify_existing_metric({?APP, vnode, index, deletes}, Postings, spiral), - folsom_metrics:notify_existing_metric({?APP, vnode, index, deletes, postings}, Postings, spiral); -do_update({get_fsm, Bucket, Microsecs, Stages, undefined, undefined, PerBucket}) -> - folsom_metrics:notify_existing_metric({?APP, node, gets}, 1, spiral), - folsom_metrics:notify_existing_metric({?APP, node, gets, time}, Microsecs, histogram), - do_stages([?APP, node, gets, time], Stages), - do_get_bucket(PerBucket, {Bucket, Microsecs, Stages, undefined, undefined}); -do_update({get_fsm, Bucket, Microsecs, Stages, NumSiblings, ObjSize, PerBucket}) -> - folsom_metrics:notify_existing_metric({?APP, node, gets}, 1, spiral), - folsom_metrics:notify_existing_metric({?APP, node, gets, time}, Microsecs, histogram), - folsom_metrics:notify_existing_metric({?APP, node, gets, siblings}, NumSiblings, histogram), - folsom_metrics:notify_existing_metric({?APP, node, gets, objsize}, ObjSize, histogram), - do_stages([?APP, node, gets, time], Stages), - do_get_bucket(PerBucket, {Bucket, Microsecs, Stages, NumSiblings, ObjSize}); -do_update({put_fsm_time, Bucket, Microsecs, Stages, PerBucket}) -> - folsom_metrics:notify_existing_metric({?APP, node, puts}, 1, spiral), - folsom_metrics:notify_existing_metric({?APP, node, puts, time}, Microsecs, histogram), - do_stages([?APP, node, puts, time], Stages), - do_put_bucket(PerBucket, {Bucket, Microsecs, Stages}); -do_update({read_repairs, Indices, Preflist}) -> - folsom_metrics:notify_existing_metric({?APP, node, gets, read_repairs}, 1, spiral), - do_repairs(Indices, Preflist); -do_update(coord_redir) -> - folsom_metrics:notify_existing_metric({?APP, node, puts, coord_redirs}, {inc, 1}, counter); -do_update(mapper_start) -> - folsom_metrics:notify_existing_metric({?APP, mapper_count}, {inc, 1}, counter); -do_update(mapper_end) -> - folsom_metrics:notify_existing_metric({?APP, mapper_count}, {dec, 1}, counter); -do_update(precommit_fail) -> - folsom_metrics:notify_existing_metric({?APP, precommit_fail}, {inc, 1}, counter); -do_update(postcommit_fail) -> - folsom_metrics:notify_existing_metric({?APP, postcommit_fail}, {inc, 1}, counter). - -%% private -%% Per index stats (by op) -do_per_index(Op, Idx, USecs) -> - IdxAtom = list_to_atom(integer_to_list(Idx)), - create_or_update({?APP, vnode, Op, IdxAtom}, 1, spiral), - create_or_update({?APP, vnode, Op, time, IdxAtom}, USecs, histogram). - -%% per bucket get_fsm stats -do_get_bucket(false, _) -> - ok; -do_get_bucket(true, {Bucket, Microsecs, Stages, NumSiblings, ObjSize}=Args) -> - case (catch folsom_metrics:notify_existing_metric({?APP, node, gets, Bucket}, 1, spiral)) of - ok -> - [folsom_metrics:notify_existing_metric({?APP, node, gets, Dimension, Bucket}, Arg, histogram) - || {Dimension, Arg} <- [{time, Microsecs}, - {siblings, NumSiblings}, - {objsize, ObjSize}], Arg /= undefined], - do_stages([?APP, node, gets, time, Bucket], Stages); - {'EXIT', _} -> - folsom_metrics:new_spiral({?APP, node, gets, Bucket}), - [register_stat({?APP, node, gets, Dimension, Bucket}, histogram) || Dimension <- [time, - siblings, - objsize]], - do_get_bucket(true, Args) - end. - -%% per bucket put_fsm stats -do_put_bucket(false, _) -> - ok; -do_put_bucket(true, {Bucket, Microsecs, Stages}=Args) -> - case (catch folsom_metrics:notify_existing_metric({?APP, node, puts, Bucket}, 1, spiral)) of - ok -> - folsom_metrics:notify_existing_metric({?APP, node, puts, time, Bucket}, Microsecs, histogram), - do_stages([?APP, node, puts, time, Bucket], Stages); - {'EXIT', _} -> - register_stat({?APP, node, puts, Bucket}, spiral), - register_stat({?APP, node, puts, time, Bucket}, histogram), - do_put_bucket(true, Args) - end. - -%% Path is list that provides a conceptual path to a stat -%% folsom uses the tuple as flat name -%% but some ets query magic means we can get stats by APP, Stat, DimensionX -%% Path, then is a list like [?APP, StatName] -%% Both get and put fsm have a list of {state, microseconds} -%% that they provide for stats. -%% Use the state to append to the stat "path" to create a further dimension on the stat -do_stages(_Path, []) -> - ok; -do_stages(Path, [{Stage, Time}|Stages]) -> - create_or_update(list_to_tuple(Path ++ [Stage]), Time, histogram), - do_stages(Path, Stages). - -%% create dimensioned stats for read repairs. -%% The indexes are from get core [{Index, Reason::notfound|outofdate}] -%% preflist is a preflist of [{{Index, Node}, Type::primary|fallback}] -do_repairs(Indices, Preflist) -> - lists:foreach(fun({{Idx, Node}, Type}) -> - case proplists:get_value(Idx, Indices) of - undefined -> - ok; - Reason -> - create_or_update({?APP, node, gets, read_repairs, Node, Type, Reason}, 1, spiral) - end - end, - Preflist). - -%% for dynamically created / dimensioned stats -%% that can't be registered at start up -create_or_update(Name, UpdateVal, Type) -> - case (catch folsom_metrics:notify_existing_metric(Name, UpdateVal, Type)) of - ok -> - ok; - {'EXIT', _} -> - register_stat(Name, Type), - create_or_update(Name, UpdateVal, Type) - end. - -%% Stats are namespaced by APP in folsom -%% so that we don't need to co-ordinate on naming -%% between apps. -stat_name(Name) when is_list(Name) -> - list_to_tuple([?APP] ++ Name); -stat_name(Name) when is_atom(Name) -> - {?APP, Name}. - -%% @doc list of {Name, Type} for static -%% stats that we can register at start up -stats() -> - [{[vnode, gets], spiral}, - {[vnode, gets, time], histogram}, - {[vnode, puts], spiral}, - {[vnode, puts, time], histogram}, - {[vnode, index, reads], spiral}, - {[vnode, index ,writes], spiral}, - {[vnode, index, writes, postings], spiral}, - {[vnode, index, deletes], spiral}, - {[vnode, index, deletes, postings], spiral}, - {[node, gets], spiral}, - {[node, gets, siblings], histogram}, - {[node, gets, objsize], histogram}, - {[node, gets, time], histogram}, - {[node, puts], spiral}, - {[node, puts, time], histogram}, - {[node, gets, read_repairs], spiral}, - {[node, puts, coord_redirs], counter}, -<<<<<<< HEAD -======= - {[node, puts, active], counter}, - {[node, gets, active], counter}, - {[node, puts, errors], spiral}, - {[node, gets, errors], spiral}, ->>>>>>> master - {mapper_count, counter}, - {precommit_fail, counter}, - {postcommit_fail, counter}, - {[vnode, backend, leveldb, read_block_error], - {function, {function, ?MODULE, leveldb_read_block_errors}}}]. - -%% @doc register a stat with folsom -register_stat(Name, spiral) -> - folsom_metrics:new_spiral(Name); -register_stat(Name, counter) -> - folsom_metrics:new_counter(Name); -register_stat(Name, histogram) -> - %% get the global default histo type - {SampleType, SampleArgs} = get_sample_type(Name), - folsom_metrics:new_histogram(Name, SampleType, SampleArgs); -register_stat(Name, {function, F}) -> - %% store the function in a gauge metric - folsom_metrics:new_gauge(Name), - folsom_metrics:notify({Name, F}). - -%% @doc the histogram sample type may be set in app.config -%% use key `stat_sample_type' in the `riak_kv' section. Or the -%% name of an `histogram' stat. -%% Check the folsom homepage for available types. -%% Defaults to `{slide_uniform, {60, 1028}}' (a uniform sliding window -%% of 60 seconds, with a uniform sample of at most 1028 entries) -get_sample_type(Name) -> - SampleType0 = app_helper:get_env(riak_kv, stat_sample_type, {slide_uniform, {60, 1028}}), - app_helper:get_env(riak_kv, Name, SampleType0). - -%% @doc produce the legacy blob of stats for display. -produce_stats() -> - riak_kv_stat_bc:produce_stats(). - -%% @doc get the leveldb.ReadBlockErrors counter. -%% non-zero values mean it is time to consider replacing -%% this nodes disk. -leveldb_read_block_errors() -> - %% level stats are per node - %% but the way to get them is - %% is with riak_kv_vnode:vnode_status/1 - %% for that reason just chose a partition - %% on this node at random - %% and ask for it's stats - {ok, R} = riak_core_ring_manager:get_my_ring(), - Indices = riak_core_ring:my_indices(R), - Nth = crypto:rand_uniform(1, length(Indices)), - Idx = lists:nth(Nth, Indices), - PList = [{Idx, node()}], - [{Idx, [Status]}] = riak_kv_vnode:vnode_status(PList), - leveldb_read_block_errors(Status). - -leveldb_read_block_errors({backend_status, riak_kv_eleveldb_backend, Status}) -> - rbe_val(proplists:get_value(read_block_error, Status)); -leveldb_read_block_errors({backend_status, riak_kv_multi_backend, Statuses}) -> - multibackend_read_block_errors(Statuses, undefined); -leveldb_read_block_errors(_) -> - undefined. - -<<<<<<< HEAD -======= -get_put_monitor_stats() -> - GPStats = riak_kv_get_put_monitor:all_stats(), - get_put_monitor_stats(GPStats). - -get_put_monitor_stats(Stats) -> - get_put_monitor_stats(Stats, []). - -get_put_monitor_stats([], Acc) -> - lists:reverse(Acc); - -get_put_monitor_stats([{Key, Val} | Tail], Acc) when is_list(Val) -> - BaseKey = lists:nthtail(1, tuple_to_list(Key)), - Stats = [{get_put_monitor_stats_join(BaseKey ++ [SubKey]), SubVal} - || {SubKey, SubVal} <- Val], - get_put_monitor_stats(Tail, Stats ++ Acc); - -get_put_monitor_stats([{Key, Val} | Tail], Acc) -> - BaseKey = lists:nthtail(1, tuple_to_list(Key)), - Stat = {get_put_monitor_stats_join(BaseKey), Val}, - get_put_monitor_stats(Tail, [Stat | Acc]). - -get_put_monitor_stats_join(Parts) -> - get_put_monitor_stats_join(Parts, []). - -get_put_monitor_stats_join([Part | Tail], []) -> - get_put_monitor_stats_join(Tail, [Part]); - -get_put_monitor_stats_join([], Acc) -> - Joined = lists:reverse(Acc), - Stringy = [atom_to_list(X) || X <- Joined], - list_to_atom(lists:flatten(Stringy)); - -get_put_monitor_stats_join([Part | Tail], Acc) -> - get_put_monitor_stats_join(Tail, [Part, '_' | Acc]). - ->>>>>>> master -multibackend_read_block_errors([], Val) -> - rbe_val(Val); -multibackend_read_block_errors([{_Name, Status}|Rest], undefined) -> - RBEVal = case proplists:get_value(mod, Status) of - riak_kv_eleveldb_backend -> - proplists:get_value(read_block_error, Status); - _ -> undefined - end, - multibackend_read_block_errors(Rest, RBEVal); -multibackend_read_block_errors(_, Val) -> - rbe_val(Val). - -rbe_val(undefined) -> - undefined; -rbe_val(Bin) -> - list_to_integer(binary_to_list(Bin)). diff --git a/src/riak_kv_stat_bc.erl.orig b/src/riak_kv_stat_bc.erl.orig deleted file mode 100644 index f0aa176c22..0000000000 --- a/src/riak_kv_stat_bc.erl.orig +++ /dev/null @@ -1,417 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_stat_bc: backwards compatible stats module. Maps new folsom stats -%% to legacy riak_kv stats. -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc riak_kv_stat_bc is a module that maps the new riak_kv_stats metrics -%% to the old set of stats. It exists to maintain backwards compatibility for -%% those using the `/stats` endpoint and `riak-admin status`. This module -%% should be considered soon to be deprecated and temporary. -%% -%% Legacy stats: -%%
vnode_gets -%%
Total number of gets handled by all vnodes on this node -%% in the last minute. -%%
-%%
vnode_puts -%%
Total number of puts handled by all vnodes on this node -%% in the last minute. -%%
-%%
vnode_index_reads -%%
The number of index reads handled by all vnodes on this node. -%% Each query counts as an index read. -%%
< -%%
vnode_index_writes -%%
The number of batched writes handled by all vnodes on this node. -%%
-%%
vnode_index_writes_postings -%%
The number of postings written to all vnodes on this node. -%%
-%%
vnode_index_deletes -%%
The number of batched writes handled by all vnodes on this node. -%%
update({vnode_index_delete, PostingsRemoved}) -%% -%%
vnode_index_deletes_postings -%%
The number of postings written to all vnodes on this node. -%%
node_gets -%%
Number of gets coordinated by this node in the last -%% minute. -%%
node_get_fsm_siblings -%%
Stats about number of siblings per object in the last minute. -%%
node_get_fsm_objsize -%%
Stats about object size over the last minute. The object -%% size is an estimate calculated by summing the size of the -%% bucket name, key name, and serialized vector clock, plus -%% the value and serialized metadata of each sibling. -%%
node_get_fsm_time_mean -%%
Mean time, in microseconds, between when a riak_kv_get_fsm is -%% started and when it sends a reply to the client, for the -%% last minute. -%%
node_get_fsm_time_median -%%
Median time, in microseconds, between when a riak_kv_get_fsm -%% is started and when it sends a reply to the client, for -%% the last minute. -%%
node_get_fsm_time_95 -%%
Response time, in microseconds, met or beaten by 95% of -%% riak_kv_get_fsm executions. -%%
node_get_fsm_time_99 -%%
Response time, in microseconds, met or beaten by 99% of -%% riak_kv_get_fsm executions. -%%
node_get_fsm_time_100 -%%
Response time, in microseconds, met or beaten by 100% of -%% riak_kv_get_fsm executions. -%%
node_puts -%%
Number of puts coordinated by this node in the last -%% minute. -%%
node_put_fsm_time_mean -%%
Mean time, in microseconds, between when a riak_kv_put_fsm is -%% started and when it sends a reply to the client, for the -%% last minute. -%%
node_put_fsm_time_median -%%
Median time, in microseconds, between when a riak_kv_put_fsm -%% is started and when it sends a reply to the client, for -%% the last minute. -%%
node_put_fsm_time_95 -%%
Response time, in microseconds, met or beaten by 95% of -%% riak_kv_put_fsm executions. -%%
node_put_fsm_time_99 -%%
Response time, in microseconds, met or beaten by 99% of -%% riak_kv_put_fsm executions. -%%
node_put_fsm_time_100 -%%
Response time, in microseconds, met or beaten by 100% of -%% riak_kv_put_fsm executions. -%%
cpu_nprocs -%%
Value returned by {@link cpu_sup:nprocs/0}. -%% -%%
cpu_avg1 -%%
Value returned by {@link cpu_sup:avg1/0}. -%% -%%
cpu_avg5 -%%
Value returned by {@link cpu_sup:avg5/0}. -%% -%%
cpu_avg15 -%%
Value returned by {@link cpu_sup:avg15/0}. -%% -%%
mem_total -%%
The first element of the tuple returned by -%% {@link memsup:get_memory_data/0}. -%% -%%
mem_allocated -%%
The second element of the tuple returned by -%% {@link memsup:get_memory_data/0}. -%% -%%
disk -%%
Value returned by {@link disksup:get_disk_data/0}. -%% -%%
pbc_connects_total -%%
Total number of pb socket connections since start -%% -%%
pbc_active -%%
Number of active pb socket connections -%% -%%
coord_redirs_total -%%
Number of puts forwarded to be coordinated on a node -%% in the preflist. -%% -%%
-%% -%% --module(riak_kv_stat_bc). - --compile(export_all). - -%% @spec produce_stats(state(), integer()) -> proplist() -%% @doc Produce a proplist-formatted view of the current aggregation -%% of stats. -produce_stats() -> - lists:append( - [lists:flatten(backwards_compat(riak_core_stat_q:get_stats([riak_kv]))), - backwards_compat_pb(riak_core_stat_q:get_stats([riak_api])), - read_repair_stats(), - level_stats(), - pipe_stats(), - cpu_stats(), - mem_stats(), - disk_stats(), - system_stats(), - ring_stats(), - config_stats(), - app_stats(), - memory_stats() - ]). - -%% Stats in folsom are stored with tuples as keys, the -%% tuples mimic an hierarchical structure. To be free of legacy -%% naming constraints the new names are not simply the old names -%% with commas for underscores. Uses legacy_stat_map to generate -%% legacys stats from the new list of stats. -backwards_compat(Stats) -> - [bc_stat(Old, New, Type, Stats) || {Old, New, Type} <- legacy_stat_map()]. - -bc_stat(Old, {New, Field}, histogram_percentile, Stats) -> - Stat = proplists:get_value(New, Stats), - Percentile = proplists:get_value(percentile, Stat), - Val = proplists:get_value(Field, Percentile), - {Old, trunc(Val)}; -bc_stat(Old, {New, Field}, histogram, Stats) -> - Stat = proplists:get_value(New, Stats), - Val = proplists:get_value(Field, Stat), - {Old, trunc(Val)}; -bc_stat(Old, {New, Field}, spiral, Stats) -> - Stat = proplists:get_value(New, Stats), - Val = proplists:get_value(Field, Stat), - {Old, Val}; -bc_stat(Old, New, counter, Stats) -> - Stat = proplists:get_value(New, Stats), - {Old, Stat}. - - -%% hard coded mapping of stats to legacy format -%% There was a enough variation in the old names that a simple -%% concatenation of the elements in the new stat key would not suffice -%% applications depend on these exact legacy names. -legacy_stat_map() -> - [{vnode_gets, {{riak_kv, vnode, gets}, one}, spiral}, - {vnode_gets_total, {{riak_kv, vnode, gets}, count}, spiral}, - {vnode_puts, {{riak_kv, vnode, puts}, one}, spiral}, - {vnode_puts_total, {{riak_kv, vnode, puts}, count}, spiral}, - {vnode_index_reads, {{riak_kv, vnode, index, reads}, one}, spiral}, - {vnode_index_reads_total, {{riak_kv, vnode, index, reads}, count}, spiral}, - {vnode_index_writes, {{riak_kv, vnode, index, writes}, one}, spiral}, - {vnode_index_writes_total, {{riak_kv, vnode, index, writes}, count}, spiral}, - {vnode_index_writes_postings, {{riak_kv,vnode,index,writes,postings}, one}, spiral}, - {vnode_index_writes_postings_total, {{riak_kv,vnode,index,writes,postings}, count}, spiral}, - {vnode_index_deletes, {{riak_kv,vnode,index,deletes}, one}, spiral}, - {vnode_index_deletes_total, {{riak_kv,vnode,index,deletes}, count}, spiral}, - {vnode_index_deletes_postings, {{riak_kv,vnode,index,deletes,postings}, one}, spiral}, - {vnode_index_deletes_postings_total, {{riak_kv,vnode,index,deletes,postings}, count}, spiral}, - {node_gets, {{riak_kv,node,gets}, one}, spiral}, - {node_gets_total, {{riak_kv,node,gets}, count}, spiral}, - {node_get_fsm_siblings_mean, {{riak_kv,node,gets,siblings}, arithmetic_mean}, histogram}, - {node_get_fsm_siblings_median, {{riak_kv,node,gets,siblings}, median}, histogram}, - {node_get_fsm_siblings_95, {{riak_kv,node,gets,siblings}, 95}, histogram_percentile}, - {node_get_fsm_siblings_99, {{riak_kv,node,gets,siblings}, 99}, histogram_percentile}, - {node_get_fsm_siblings_100, {{riak_kv,node,gets,siblings}, max}, histogram}, - {node_get_fsm_objsize_mean, {{riak_kv,node,gets,objsize}, arithmetic_mean}, histogram}, - {node_get_fsm_objsize_median, {{riak_kv,node,gets,objsize}, median}, histogram}, - {node_get_fsm_objsize_95, {{riak_kv,node,gets,objsize}, 95}, histogram_percentile}, - {node_get_fsm_objsize_99, {{riak_kv,node,gets,objsize}, 99}, histogram_percentile}, - {node_get_fsm_objsize_100, {{riak_kv,node,gets,objsize}, max}, histogram}, - {node_get_fsm_time_mean, {{riak_kv,node,gets,time}, arithmetic_mean}, histogram}, - {node_get_fsm_time_median, {{riak_kv,node,gets,time}, median}, histogram}, - {node_get_fsm_time_95, {{riak_kv,node,gets,time}, 95}, histogram_percentile}, - {node_get_fsm_time_99, {{riak_kv,node,gets,time}, 99}, histogram_percentile}, - {node_get_fsm_time_100, {{riak_kv,node,gets,time}, max}, histogram}, - {node_puts, {{riak_kv,node, puts}, one}, spiral}, - {node_puts_total, {{riak_kv,node, puts}, count}, spiral}, - {node_put_fsm_time_mean, {{riak_kv,node, puts, time}, arithmetic_mean}, histogram}, - {node_put_fsm_time_median, {{riak_kv,node, puts, time}, median}, histogram}, - {node_put_fsm_time_95, {{riak_kv,node, puts, time}, 95}, histogram_percentile}, - {node_put_fsm_time_99, {{riak_kv,node, puts, time}, 99}, histogram_percentile}, - {node_put_fsm_time_100, {{riak_kv,node, puts, time}, max}, histogram}, - {read_repairs, {{riak_kv,node,gets,read_repairs}, one}, spiral}, - {read_repairs_total, {{riak_kv,node,gets,read_repairs}, count}, spiral}, - {coord_redirs_total, {riak_kv,node,puts,coord_redirs}, counter}, - {executing_mappers, {riak_kv,mapper_count}, counter}, - {precommit_fail, {riak_kv, precommit_fail}, counter}, - {postcommit_fail, {riak_kv, postcommit_fail}, counter} - ]. - -%% PB stats are now under riak_api. In the past they were part of riak_kv. -%% This function maps those new values to the old names. -backwards_compat_pb(Stats) -> - [bc_stat(Old, New, Type, Stats) || {Old, New, Type} <- - [{pbc_active, {riak_api, pbc_connects, active}, counter}, - {pbc_connects, {{riak_api, pbc_connects}, one}, spiral}, - {pbc_connects_total, {{riak_api, pbc_connects}, count}, spiral}]]. - -%% @spec cpu_stats() -> proplist() -%% @doc Get stats on the cpu, as given by the cpu_sup module -%% of the os_mon application. -cpu_stats() -> - [{cpu_nprocs, cpu_sup:nprocs()}, - {cpu_avg1, cpu_sup:avg1()}, - {cpu_avg5, cpu_sup:avg5()}, - {cpu_avg15, cpu_sup:avg15()}]. - -%% @spec mem_stats() -> proplist() -%% @doc Get stats on the memory, as given by the memsup module -%% of the os_mon application. -mem_stats() -> - {Total, Alloc, _} = memsup:get_memory_data(), - [{mem_total, Total}, - {mem_allocated, Alloc}]. - -%% @spec disk_stats() -> proplist() -%% @doc Get stats on the disk, as given by the disksup module -%% of the os_mon application. -disk_stats() -> - [{disk, disksup:get_disk_data()}]. - -system_stats() -> - [{nodename, node()}, - {connected_nodes, nodes()}, - {sys_driver_version, list_to_binary(erlang:system_info(driver_version))}, -<<<<<<< HEAD - {sys_global_heaps_size, erlang:system_info(global_heaps_size)}, -======= - {sys_global_heaps_size, safe_global_heap_size()}, ->>>>>>> master - {sys_heap_type, erlang:system_info(heap_type)}, - {sys_logical_processors, erlang:system_info(logical_processors)}, - {sys_otp_release, list_to_binary(erlang:system_info(otp_release))}, - {sys_process_count, erlang:system_info(process_count)}, - {sys_smp_support, erlang:system_info(smp_support)}, - {sys_system_version, list_to_binary(string:strip(erlang:system_info(system_version), right, $\n))}, - {sys_system_architecture, list_to_binary(erlang:system_info(system_architecture))}, - {sys_threads_enabled, erlang:system_info(threads)}, - {sys_thread_pool_size, erlang:system_info(thread_pool_size)}, - {sys_wordsize, erlang:system_info(wordsize)}]. - -<<<<<<< HEAD -======= -safe_global_heap_size() -> - try erlang:system_info(global_heaps_size) of - N -> N - catch - error:badarg -> - deprecated - end. - ->>>>>>> master -app_stats() -> - [{list_to_atom(atom_to_list(A) ++ "_version"), list_to_binary(V)} - || {A,_,V} <- application:which_applications()]. - -memory_stats() -> - [{list_to_atom("memory_" ++ atom_to_list(K)), V} || {K,V} <- erlang:memory()]. - -ring_stats() -> - {ok, R} = riak_core_ring_manager:get_my_ring(), - [{ring_members, riak_core_ring:all_members(R)}, - {ring_num_partitions, riak_core_ring:num_partitions(R)}, - {ring_ownership, list_to_binary(lists:flatten(io_lib:format("~p", [dict:to_list( - lists:foldl(fun({_P, N}, Acc) -> - case dict:find(N, Acc) of - {ok, V} -> - dict:store(N, V+1, Acc); - error -> - dict:store(N, 1, Acc) - end - end, dict:new(), riak_core_ring:all_owners(R)))])))}]. - - -config_stats() -> - [{ring_creation_size, app_helper:get_env(riak_core, ring_creation_size)}, - {storage_backend, app_helper:get_env(riak_kv, storage_backend)}]. - -%% @doc add the pipe stats to the blob in a style consistent -%% with those stats already in the blob -pipe_stats() -> - Stats = riak_core_stat_q:get_stats([riak_pipe]), - lists:flatten([bc_stat(Name, Val) || {Name, Val} <- Stats]). - -%% old style blob stats don't have the app name -%% and they have underscores, not commas -bc_stat(Name, Val) -> - StatName = join(tl(tuple_to_list(Name))), - bc_stat_val(StatName, Val). - -%% Old style stats don't have tuple lists as values -%% they have an entry per element in the complex stats tuple list -%% so a spiral with both a count and a one minute reading -%% would be two stats, of NAME_count and NAME_one -%% let's do that -bc_stat_val(StatName, Val) when is_list(Val) -> - [{join([StatName, ValName]), ValVal} || {ValName, ValVal} <- Val]; -bc_stat_val(StatName, Val) -> - {StatName, Val}. - -%% Leveldb stats are a last minute new edition to the blob -level_stats() -> - Stats = riak_core_stat_q:get_stats([riak_kv, vnode, backend, leveldb, read_block_error]), - [{join(lists:nthtail(3, tuple_to_list(Name))), Val} || {Name, Val} <- Stats]. - -%% Read repair stats are a new edition to the legacy blob. -%% Added to the blob since the stat query interface was not ready for the 1.3 -%% release. -%% The read repair stats are stored as dimensions with -%% the key {riak_kv, node, gets, read_repairs, Node, Type, Reason}. -%% The CSEs are only interested in aggregations of Type and Reason -%% which are elements 6 and 7 in the key. -read_repair_stats() -> - aggregate(read_repairs, [riak_kv, node, gets, read_repairs, '_', '_', '_'], [6,7]). - -%% TODO generalise for riak_core_stat_q -%% aggregates spiral values for stats retrieved by `Query' -%% aggregates by the key field(s) indexed at `Fields' -%% produces a flat list of `BaseName_NameOfFieldAtIndex[_count]' -%% to fit in with the existing naming convention in the legacy stat blob -aggregate(BaseName, Query, Fields) -> - Stats = riak_core_stat_q:get_stats(Query), - Aggregates = do_aggregate(Stats, Fields), - FlatStats = flatten_aggregate_stats(BaseName, Aggregates), - lists:flatten(FlatStats). - -do_aggregate(Stats, Fields) -> - lists:foldl(fun({Name, [{count, C0}, {one, O0}]}, Acc) -> - Key = key_from_fields(Name, Fields), - [{count, C}, {one, O}] = case orddict:find(Key, Acc) of - error -> [{count, 0}, {one, 0}]; - {ok, V} -> V - end, - orddict:store(Key, [{count, C+C0}, {one, O+O0}], Acc) - end, - orddict:new(), - Stats). - -%% Generate a dictionary key for the running -%% aggregation using key `Name' elements at index(es) -%% in `Fields' -key_from_fields(Name, Fields) -> - Key = [element(N, Name) || N <- Fields], - join(Key). - -%% Folds over the aggregate nested dictionaries to create -%% a flat list of stats whose names are made by -%% joining key names to `BaseName' -flatten_aggregate_stats(BaseName, Aggregates) -> - orddict:fold(fun(K, V, Acc) when not is_list(V) -> - [{join([BaseName, K]), V}|Acc]; - (K, V, Acc) -> - [flatten_aggregate_stats(join([BaseName, K]), V)|Acc] - end, - [], - Aggregates). - -%% Join a list of atoms into a single atom -%% with elements separated by '_' -join(L) -> - join(L, <<>>). - -join([], Bin) -> - binary_to_atom(Bin, latin1); -join([Atom|Rest], <<>>) -> - Bin2 = atom_to_binary(Atom, latin1), - join(Rest, <>); -join([Atom|Rest], Bin) -> - Bin2 = atom_to_binary(Atom, latin1), - join(Rest, <>). diff --git a/src/riak_kv_sup.erl.orig b/src/riak_kv_sup.erl.orig deleted file mode 100644 index 2c95ceab26..0000000000 --- a/src/riak_kv_sup.erl.orig +++ /dev/null @@ -1,139 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_sup: supervise the core Riak services -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc supervise the core Riak services - --module(riak_kv_sup). - --include_lib("riak_kv_js_pools.hrl"). - --behaviour(supervisor). - --export([start_link/0]). --export([init/1]). - --define (IF (Bool, A, B), if Bool -> A; true -> B end). - -%% @spec start_link() -> ServerRet -%% @doc API for starting the supervisor. -start_link() -> - supervisor:start_link({local, ?MODULE}, ?MODULE, []). - -%% @spec init([]) -> SupervisorTree -%% @doc supervisor callback. -init([]) -> - catch dtrace:init(), % NIF load trigger (R14B04) - catch dyntrace:p(), % NIF load trigger (R15B01+) - VMaster = {riak_kv_vnode_master, - {riak_core_vnode_master, start_link, - [riak_kv_vnode, riak_kv_legacy_vnode, riak_kv]}, - permanent, 5000, worker, [riak_core_vnode_master]}, - MapJSPool = {?JSPOOL_MAP, - {riak_kv_js_manager, start_link, - [?JSPOOL_MAP, read_js_pool_size(map_js_vm_count, "map")]}, - permanent, 30000, worker, [riak_kv_js_manager]}, - ReduceJSPool = {?JSPOOL_REDUCE, - {riak_kv_js_manager, start_link, - [?JSPOOL_REDUCE, read_js_pool_size(reduce_js_vm_count, "reduce")]}, - permanent, 30000, worker, [riak_kv_js_manager]}, - HookJSPool = {?JSPOOL_HOOK, - {riak_kv_js_manager, start_link, - [?JSPOOL_HOOK, read_js_pool_size(hook_js_vm_count, "hook callback")]}, - permanent, 30000, worker, [riak_kv_js_manager]}, - JSSup = {riak_kv_js_sup, - {riak_kv_js_sup, start_link, []}, - permanent, infinity, supervisor, [riak_kv_js_sup]}, - GetFsmSup = {riak_kv_get_fsm_sup, - {riak_kv_get_fsm_sup, start_link, []}, - permanent, infinity, supervisor, [riak_kv_get_fsm_sup]}, - PutFsmSup = {riak_kv_put_fsm_sup, - {riak_kv_put_fsm_sup, start_link, []}, - permanent, infinity, supervisor, [riak_kv_put_fsm_sup]}, - DeleteSup = {riak_kv_delete_sup, - {riak_kv_delete_sup, start_link, []}, - permanent, infinity, supervisor, [riak_kv_delete_sup]}, - BucketsFsmSup = {riak_kv_buckets_fsm_sup, - {riak_kv_buckets_fsm_sup, start_link, []}, - permanent, infinity, supervisor, [riak_kv_buckets_fsm_sup]}, - KeysFsmSup = {riak_kv_keys_fsm_sup, - {riak_kv_keys_fsm_sup, start_link, []}, - permanent, infinity, supervisor, [riak_kv_keys_fsm_sup]}, - IndexFsmSup = {riak_kv_index_fsm_sup, - {riak_kv_index_fsm_sup, start_link, []}, - permanent, infinity, supervisor, [riak_kv_index_fsm_sup]}, - SinkFsmSup = {riak_kv_mrc_sink_sup, - {riak_kv_mrc_sink_sup, start_link, []}, - permanent, infinity, supervisor, [riak_kv_mrc_sink_sup]}, -<<<<<<< HEAD -======= - GetPutMonitor = {riak_kv_get_put_monitor, - {riak_kv_get_put_monitor, start_link, []}, - permanent, 5000, worker, [riak_kv_get_put_monitor]}, - - EntropyManager = {riak_kv_entropy_manager, - {riak_kv_entropy_manager, start_link, []}, - permanent, 30000, worker, [riak_kv_entropy_manager]}, ->>>>>>> master - - % Figure out which processes we should run... - HasStorageBackend = (app_helper:get_env(riak_kv, storage_backend) /= undefined), - - % Build the process list... - Processes = lists:flatten([ - ?IF(HasStorageBackend, VMaster, []), - GetFsmSup, - PutFsmSup, - DeleteSup, - SinkFsmSup, - BucketsFsmSup, - KeysFsmSup, - IndexFsmSup, -<<<<<<< HEAD -======= - EntropyManager, - GetPutMonitor, ->>>>>>> master - JSSup, - MapJSPool, - ReduceJSPool, - HookJSPool - ]), - - % Run the proesses... - {ok, {{one_for_one, 10, 10}, Processes}}. - -%% Internal functions -read_js_pool_size(Entry, PoolType) -> - case app_helper:get_env(riak_kv, Entry, undefined) of - undefined -> - OldSize = app_helper:get_env(riak_kv, js_vm_count, 0), - lager:warning("js_vm_count has been deprecated. " - "Please use ~p to configure the ~s pool.", [Entry, PoolType]), - case OldSize > 8 of - true -> - OldSize div 3; - false -> - OldSize - end; - Size -> - Size - end. diff --git a/src/riak_kv_util.erl.orig b/src/riak_kv_util.erl.orig deleted file mode 100644 index 053a38cb93..0000000000 --- a/src/riak_kv_util.erl.orig +++ /dev/null @@ -1,266 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_util: functions that are useful throughout Riak -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - - -%% @doc Various functions that are useful throughout riak_kv. --module(riak_kv_util). - - --export([is_x_deleted/1, - obj_not_deleted/1, - try_cast/3, - fallback/4, - expand_value/3, - expand_rw_value/4, - normalize_rw_value/2, -<<<<<<< HEAD - make_request/2]). -======= - make_request/2, - get_index_n/2, - preflist_siblings/1, - responsible_preflists/1, - responsible_preflists/2]). ->>>>>>> master - --include_lib("riak_kv_vnode.hrl"). - --ifdef(TEST). --include_lib("eunit/include/eunit.hrl"). --endif. - --type riak_core_ring() :: riak_core_ring:riak_core_ring(). --type index() :: non_neg_integer(). --type index_n() :: {index(), pos_integer()}. - -%% =================================================================== -%% Public API -%% =================================================================== - -%% @spec is_x_deleted(riak_object:riak_object()) -> boolean() -%% @doc 'true' if all contents of the input object are marked -%% as deleted; 'false' otherwise -%% @equiv obj_not_deleted(Obj) == undefined -is_x_deleted(Obj) -> - case obj_not_deleted(Obj) of - undefined -> true; - _ -> false - end. - -%% @spec obj_not_deleted(riak_object:riak_object()) -> -%% undefined|riak_object:riak_object() -%% @doc Determine whether all contents of an object are marked as -%% deleted. Return is the atom 'undefined' if all contents -%% are marked deleted, or the input Obj if any of them are not. -obj_not_deleted(Obj) -> - case [{M, V, C} || {M, V, C} <- riak_object:get_contents(Obj), - dict:is_key(<<"X-Riak-Deleted">>, M) =:= false] of - [] -> undefined; - _ -> Obj - end. - -%% @spec try_cast(term(), [node()], [{Index :: term(), Node :: node()}]) -> -%% {[{Index :: term(), Node :: node(), Node :: node()}], -%% [{Index :: term(), Node :: node()}]} -%% @doc Cast {Cmd, {Index,Node}, Msg} at riak_kv_vnode_master on Node -%% if Node is in UpNodes. The list of successful casts is the -%% first element of the return tuple, and the list of unavailable -%% nodes is the second element. Used in riak_kv_put_fsm and riak_kv_get_fsm. -try_cast(Msg, UpNodes, Targets) -> - try_cast(Msg, UpNodes, Targets, [], []). -try_cast(_Msg, _UpNodes, [], Sent, Pangs) -> {Sent, Pangs}; -try_cast(Msg, UpNodes, [{Index,Node}|Targets], Sent, Pangs) -> - case lists:member(Node, UpNodes) of - false -> - try_cast(Msg, UpNodes, Targets, Sent, [{Index,Node}|Pangs]); - true -> - gen_server:cast({riak_kv_vnode_master, Node}, make_request(Msg, Index)), - try_cast(Msg, UpNodes, Targets, [{Index,Node,Node}|Sent],Pangs) - end. - -%% @spec fallback(term(), term(), [{Index :: term(), Node :: node()}], -%% [{any(), Fallback :: node()}]) -> -%% [{Index :: term(), Node :: node(), Fallback :: node()}] -%% @doc Cast {Cmd, {Index,Node}, Msg} at a node in the Fallbacks list -%% for each node in the Pangs list. Pangs should have come -%% from the second element of the response tuple of a call to -%% try_cast/3. -%% Used in riak_kv_put_fsm and riak_kv_get_fsm - -fallback(Cmd, UpNodes, Pangs, Fallbacks) -> - fallback(Cmd, UpNodes, Pangs, Fallbacks, []). -fallback(_Cmd, _UpNodes, [], _Fallbacks, Sent) -> Sent; -fallback(_Cmd, _UpNodes, _Pangs, [], Sent) -> Sent; -fallback(Cmd, UpNodes, [{Index,Node}|Pangs], [{_,FN}|Fallbacks], Sent) -> - case lists:member(FN, UpNodes) of - false -> fallback(Cmd, UpNodes, [{Index,Node}|Pangs], Fallbacks, Sent); - true -> - gen_server:cast({riak_kv_vnode_master, FN}, make_request(Cmd, Index)), - fallback(Cmd, UpNodes, Pangs, Fallbacks, [{Index,Node,FN}|Sent]) - end. - - --spec make_request(vnode_req(), partition()) -> #riak_vnode_req_v1{}. -make_request(Request, Index) -> - riak_core_vnode_master:make_request(Request, - {fsm, undefined, self()}, - Index). - -get_bucket_option(Type, BucketProps) -> - case proplists:get_value(Type, BucketProps, default) of - default -> - {ok, DefaultProps} = application:get_env(riak_core, default_bucket_props), - proplists:get_value(Type, DefaultProps, error); - Val -> Val - end. - -expand_value(Type, default, BucketProps) -> - get_bucket_option(Type, BucketProps); -expand_value(_Type, Value, _BucketProps) -> - Value. - -expand_rw_value(Type, default, BucketProps, N) -> - normalize_rw_value(get_bucket_option(Type, BucketProps), N); -expand_rw_value(_Type, Val, _BucketProps, N) -> - normalize_rw_value(Val, N). - -normalize_rw_value(RW, _N) when is_integer(RW) -> RW; -normalize_rw_value(RW, N) when is_binary(RW) -> - try - ExistingAtom = binary_to_existing_atom(RW, utf8), - normalize_rw_value(ExistingAtom, N) - catch _:badarg -> - error - end; -normalize_rw_value(one, _N) -> 1; -normalize_rw_value(quorum, N) -> erlang:trunc((N/2)+1); -normalize_rw_value(all, N) -> N; -normalize_rw_value(_, _) -> error. - -<<<<<<< HEAD -======= -%% =================================================================== -%% Preflist utility functions -%% =================================================================== - -%% @doc Given a bucket/key, determine the associated preflist index_n. --spec get_index_n({binary(), binary()}, riak_core_ring()) -> index_n(). -get_index_n({Bucket, Key}, Ring) -> - BucketProps = riak_core_bucket:get_bucket(Bucket, Ring), - N = proplists:get_value(n_val, BucketProps), - ChashKey = riak_core_util:chash_key({Bucket, Key}), - Index = riak_core_ring:responsible_index(ChashKey, Ring), - {Index, N}. - -%% @doc Given an index, determine all sibling indices that participate in one -%% or more preflists with the specified index. --spec preflist_siblings(index()) -> [index()]. -preflist_siblings(Index) -> - {ok, Ring} = riak_core_ring_manager:get_my_ring(), - preflist_siblings(Index, Ring). - -%% @doc See {@link preflist_siblings/1}. --spec preflist_siblings(index(), riak_core_ring()) -> [index()]. -preflist_siblings(Index, Ring) -> - MaxN = determine_max_n(Ring), - preflist_siblings(Index, MaxN, Ring). - --spec preflist_siblings(index(), pos_integer(), riak_core_ring()) -> [index()]. -preflist_siblings(Index, N, Ring) -> - IndexBin = <>, - PL = riak_core_ring:preflist(IndexBin, Ring), - Indices = [Idx || {Idx, _} <- PL], - RevIndices = lists:reverse(Indices), - {Succ, _} = lists:split(N-1, Indices), - {Pred, _} = lists:split(N-1, tl(RevIndices)), - lists:reverse(Pred) ++ Succ. - --spec responsible_preflists(index()) -> [index_n()]. -responsible_preflists(Index) -> - {ok, Ring} = riak_core_ring_manager:get_my_ring(), - responsible_preflists(Index, Ring). - --spec responsible_preflists(index(), riak_core_ring()) -> [index_n()]. -responsible_preflists(Index, Ring) -> - AllN = determine_all_n(Ring), - responsible_preflists(Index, AllN, Ring). - --spec responsible_preflists(index(), [pos_integer(),...], riak_core_ring()) - -> [index_n()]. -responsible_preflists(Index, AllN, Ring) -> - IndexBin = <>, - PL = riak_core_ring:preflist(IndexBin, Ring), - Indices = [Idx || {Idx, _} <- PL], - RevIndices = lists:reverse(Indices), - lists:flatmap(fun(N) -> - responsible_preflists_n(RevIndices, N) - end, AllN). - --spec responsible_preflists_n([index()], pos_integer()) -> [index_n()]. -responsible_preflists_n(RevIndices, N) -> - {Pred, _} = lists:split(N, RevIndices), - [{Idx, N} || Idx <- lists:reverse(Pred)]. - --spec determine_max_n(riak_core_ring()) -> pos_integer(). -determine_max_n(Ring) -> - lists:max(determine_all_n(Ring)). - --spec determine_all_n(riak_core_ring()) -> [pos_integer(),...]. -determine_all_n(Ring) -> - Buckets = riak_core_ring:get_buckets(Ring), - BucketProps = [riak_core_bucket:get_bucket(Bucket, Ring) || Bucket <- Buckets], - Default = app_helper:get_env(riak_core, default_bucket_props), - DefaultN = proplists:get_value(n_val, Default), - AllN = lists:foldl(fun(Props, AllN) -> - N = proplists:get_value(n_val, Props), - ordsets:add_element(N, AllN) - end, [DefaultN], BucketProps), - AllN. - ->>>>>>> master -%% =================================================================== -%% EUnit tests -%% =================================================================== --ifdef(TEST). - -normalize_test() -> - 3 = normalize_rw_value(3, 3), - 1 = normalize_rw_value(one, 3), - 2 = normalize_rw_value(quorum, 3), - 3 = normalize_rw_value(all, 3), - 1 = normalize_rw_value(<<"one">>, 3), - 2 = normalize_rw_value(<<"quorum">>, 3), - 3 = normalize_rw_value(<<"all">>, 3), - error = normalize_rw_value(garbage, 3), - error = normalize_rw_value(<<"garbage">>, 3). - - -deleted_test() -> - O = riak_object:new(<<"test">>, <<"k">>, "v"), - false = is_x_deleted(O), - MD = dict:new(), - O1 = riak_object:apply_updates( - riak_object:update_metadata( - O, dict:store(<<"X-Riak-Deleted">>, true, MD))), - true = is_x_deleted(O1). - --endif. diff --git a/src/riak_kv_vnode.erl.orig b/src/riak_kv_vnode.erl.orig deleted file mode 100644 index ec12413172..0000000000 --- a/src/riak_kv_vnode.erl.orig +++ /dev/null @@ -1,1651 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% riak_kv_vnode: VNode Implementation -%% -%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(riak_kv_vnode). --author('Kevin Smith '). --author('John Muellerleile '). - --behaviour(riak_core_vnode). - -%% API --export([test_vnode/1, put/7]). --export([start_vnode/1, - get/3, - del/3, - put/6, - local_get/2, - local_put/2, - local_put/3, - coord_put/6, - readrepair/6, - list_keys/4, - fold/3, - get_vclocks/2, - vnode_status/1, - ack_keys/1, - repair/1, - repair_status/1, -<<<<<<< HEAD - repair_filter/1]). -======= - repair_filter/1, - hashtree_pid/1, - request_hashtree_pid/1]). ->>>>>>> master - -%% riak_core_vnode API --export([init/1, - terminate/2, - handle_command/3, - handle_coverage/4, - is_empty/1, - delete/1, - handle_handoff_command/3, - handoff_starting/2, - handoff_cancelled/1, - handoff_finished/2, - handle_handoff_data/2, - encode_handoff_item/2, - handle_exit/3, - handle_info/2]). - --include_lib("riak_kv_vnode.hrl"). --include_lib("riak_kv_map_phase.hrl"). --include_lib("riak_core/include/riak_core_pb.hrl"). - --ifdef(TEST). --include_lib("eunit/include/eunit.hrl"). --export([put_merge/6]). %% For fsm_eqc_vnode --endif. - --record(mrjob, {cachekey :: term(), - bkey :: term(), - reqid :: term(), - target :: pid()}). - --record(state, {idx :: partition(), - mod :: module(), - modstate :: term(), - mrjobs :: term(), - vnodeid :: undefined | binary(), - delete_mode :: keep | immediate | pos_integer(), - bucket_buf_size :: pos_integer(), - index_buf_size :: pos_integer(), - key_buf_size :: pos_integer(), - async_folding :: boolean(), - in_handoff = false :: boolean(), - hashtrees :: pid() }). - --type index_op() :: add | remove. --type index_value() :: integer() | binary(). --type index() :: non_neg_integer(). --type state() :: #state{}. - --record(putargs, {returnbody :: boolean(), - coord:: boolean(), - lww :: boolean(), - bkey :: {binary(), binary()}, - robj :: term(), - index_specs=[] :: [{index_op(), binary(), index_value()}], - reqid :: non_neg_integer(), - bprops :: maybe_improper_list(), - starttime :: non_neg_integer(), - prunetime :: undefined| non_neg_integer(), - is_index=false :: boolean() %% set if the b/end supports indexes - }). -<<<<<<< HEAD -======= - --spec maybe_create_hashtrees(state()) -> state(). -maybe_create_hashtrees(State) -> - maybe_create_hashtrees(riak_kv_entropy_manager:enabled(), State). - --spec maybe_create_hashtrees(boolean(), state()) -> state(). -maybe_create_hashtrees(false, State) -> - State; -maybe_create_hashtrees(true, State=#state{idx=Index}) -> - %% Only maintain a hashtree if a primary vnode - {ok, Ring} = riak_core_ring_manager:get_my_ring(), - case riak_core_ring:index_owner(Ring, Index) == node() of - false -> - State; - true -> - RP = riak_kv_util:responsible_preflists(Index), - case riak_kv_index_hashtree:start(Index, RP) of - {ok, Trees} -> - monitor(process, Trees), - State#state{hashtrees=Trees}; - Error -> - lager:info("riak_kv/~p: unable to start index_hashtree: ~p", - [Index, Error]), - erlang:send_after(1000, self(), retry_create_hashtree), - State#state{hashtrees=undefined} - end - end. ->>>>>>> master - -%% API -start_vnode(I) -> - riak_core_vnode_master:get_vnode_pid(I, riak_kv_vnode). - -test_vnode(I) -> - riak_core_vnode:start_link(riak_kv_vnode, I, infinity). - -get(Preflist, BKey, ReqId) -> - %% Assuming this function is called from a FSM process - %% so self() == FSM pid - get(Preflist, BKey, ReqId, {fsm, undefined, self()}). - -<<<<<<< HEAD -======= -get(Preflist, BKey, ReqId, Sender) -> - Req = ?KV_GET_REQ{bkey=BKey, - req_id=ReqId}, - riak_core_vnode_master:command(Preflist, - Req, - Sender, - riak_kv_vnode_master). - ->>>>>>> master -del(Preflist, BKey, ReqId) -> - riak_core_vnode_master:command(Preflist, - ?KV_DELETE_REQ{bkey=BKey, - req_id=ReqId}, - riak_kv_vnode_master). - -%% Issue a put for the object to the preflist, expecting a reply -%% to an FSM. -put(Preflist, BKey, Obj, ReqId, StartTime, Options) when is_integer(StartTime) -> - put(Preflist, BKey, Obj, ReqId, StartTime, Options, {fsm, undefined, self()}). - -put(Preflist, BKey, Obj, ReqId, StartTime, Options, Sender) - when is_integer(StartTime) -> - riak_core_vnode_master:command(Preflist, - ?KV_PUT_REQ{ - bkey = BKey, - object = Obj, - req_id = ReqId, - start_time = StartTime, - options = Options}, - Sender, - riak_kv_vnode_master). - -local_put(Index, Obj) -> - local_put(Index, Obj, []). - -local_put(Index, Obj, Options) -> - BKey = {riak_object:bucket(Obj), riak_object:key(Obj)}, - Ref = make_ref(), - ReqId = erlang:phash2(erlang:now()), - StartTime = riak_core_util:moment(), - Sender = {raw, Ref, self()}, - put({Index, node()}, BKey, Obj, ReqId, StartTime, Options, Sender), - receive - {Ref, Reply} -> - Reply - end. - -local_get(Index, BKey) -> - Ref = make_ref(), - ReqId = erlang:phash2(erlang:now()), - Sender = {raw, Ref, self()}, - get({Index,node()}, BKey, ReqId, Sender), - receive - {Ref, {r, Result, Index, ReqId}} -> - Result; - {Ref, Reply} -> - {error, Reply} - end. - -%% Issue a put for the object to the preflist, expecting a reply -%% to an FSM. -coord_put(IndexNode, BKey, Obj, ReqId, StartTime, Options) when is_integer(StartTime) -> - coord_put(IndexNode, BKey, Obj, ReqId, StartTime, Options, {fsm, undefined, self()}). - -coord_put(IndexNode, BKey, Obj, ReqId, StartTime, Options, Sender) - when is_integer(StartTime) -> - riak_core_vnode_master:command(IndexNode, - ?KV_PUT_REQ{ - bkey = BKey, - object = Obj, - req_id = ReqId, - start_time = StartTime, - options = [coord | Options]}, - Sender, - riak_kv_vnode_master). - -%% Do a put without sending any replies -readrepair(Preflist, BKey, Obj, ReqId, StartTime, Options) -> - put(Preflist, BKey, Obj, ReqId, StartTime, [rr | Options], ignore). - -list_keys(Preflist, ReqId, Caller, Bucket) -> - riak_core_vnode_master:command(Preflist, - #riak_kv_listkeys_req_v2{ - bucket=Bucket, - req_id=ReqId, - caller=Caller}, - ignore, - riak_kv_vnode_master). - -fold(Preflist, Fun, Acc0) -> - riak_core_vnode_master:sync_spawn_command(Preflist, - ?FOLD_REQ{ - foldfun=Fun, - acc0=Acc0}, - riak_kv_vnode_master). - -get_vclocks(Preflist, BKeyList) -> - riak_core_vnode_master:sync_spawn_command(Preflist, - ?KV_VCLOCK_REQ{bkeys=BKeyList}, - riak_kv_vnode_master). - -%% @doc Get status information about the node local vnodes. --spec vnode_status([{partition(), pid()}]) -> [{atom(), term()}]. -vnode_status(PrefLists) -> - ReqId = erlang:phash2({self(), os:timestamp()}), - %% Get the status of each vnode - riak_core_vnode_master:command(PrefLists, - ?KV_VNODE_STATUS_REQ{}, - {raw, ReqId, self()}, - riak_kv_vnode_master), - wait_for_vnode_status_results(PrefLists, ReqId, []). - -%% @doc Repair the given `Partition'. --spec repair(partition()) -> - {ok, Pairs::[{partition(), node()}]} | - {down, Down::[{partition(), node()}]}. -repair(Partition) -> - Service = riak_kv, - MP = {riak_kv_vnode, Partition}, - FilterModFun = {?MODULE, repair_filter}, - riak_core_vnode_manager:repair(Service, MP, FilterModFun). - -%% @doc Get the status of the repair process for the given `Partition'. -<<<<<<< HEAD --spec repair_status(partition()) -> no_repair | repair_in_progress. -======= --spec repair_status(partition()) -> not_found | in_progress. ->>>>>>> master -repair_status(Partition) -> - riak_core_vnode_manager:repair_status({riak_kv_vnode, Partition}). - -%% @doc Given a `Target' partition generate a `Filter' fun to use -%% during partition repair. --spec repair_filter(partition()) -> Filter::function(). -repair_filter(Target) -> - {ok, Ring} = riak_core_ring_manager:get_my_ring(), - riak_core_repair:gen_filter(Target, - Ring, - bucket_nval_map(Ring), - default_object_nval(), - fun object_info/1). - -<<<<<<< HEAD -======= --spec hashtree_pid(index()) -> {ok, pid()}. -hashtree_pid(Partition) -> - riak_core_vnode_master:sync_command({Partition, node()}, - {hashtree_pid, node()}, - riak_kv_vnode_master, - infinity). - -%% Asynchronous version of {@link hashtree_pid/1} that sends a message back to -%% the calling process. Used by the {@link riak_kv_entropy_manager}. --spec request_hashtree_pid(index()) -> ok. -request_hashtree_pid(Partition) -> - ReqId = {hashtree_pid, Partition}, - riak_core_vnode_master:command({Partition, node()}, - {hashtree_pid, node()}, - {raw, ReqId, self()}, - riak_kv_vnode_master). ->>>>>>> master - -%% VNode callbacks - -init([Index]) -> - Mod = app_helper:get_env(riak_kv, storage_backend), - Configuration = app_helper:get_env(riak_kv), - BucketBufSize = app_helper:get_env(riak_kv, bucket_buffer_size, 1000), - IndexBufSize = app_helper:get_env(riak_kv, index_buffer_size, 100), - KeyBufSize = app_helper:get_env(riak_kv, key_buffer_size, 100), - WorkerPoolSize = app_helper:get_env(riak_kv, worker_pool_size, 10), - {ok, VId} = get_vnodeid(Index), - DeleteMode = app_helper:get_env(riak_kv, delete_mode, 3000), - AsyncFolding = app_helper:get_env(riak_kv, async_folds, true) == true, - case catch Mod:start(Index, Configuration) of - {ok, ModState} -> - %% Get the backend capabilities - State = #state{idx=Index, - async_folding=AsyncFolding, - mod=Mod, - modstate=ModState, - vnodeid=VId, - delete_mode=DeleteMode, - bucket_buf_size=BucketBufSize, - index_buf_size=IndexBufSize, - key_buf_size=KeyBufSize, - mrjobs=dict:new()}, - case AsyncFolding of - true -> - %% Create worker pool initialization tuple - FoldWorkerPool = {pool, riak_kv_worker, WorkerPoolSize, []}, -<<<<<<< HEAD - {ok, State, [FoldWorkerPool]}; -======= - State2 = maybe_create_hashtrees(State), - {ok, State2, [FoldWorkerPool]}; ->>>>>>> master - false -> - {ok, State} - end; - {error, Reason} -> - lager:error("Failed to start ~p Reason: ~p", - [Mod, Reason]), - riak:stop("backend module failed to start."), - {error, Reason}; - {'EXIT', Reason1} -> - lager:error("Failed to start ~p Reason: ~p", - [Mod, Reason1]), - riak:stop("backend module failed to start."), - {error, Reason1} - end. - - -handle_command(?KV_PUT_REQ{bkey=BKey, - object=Object, - req_id=ReqId, - start_time=StartTime, - options=Options}, - Sender, State=#state{idx=Idx}) -> - StartTS = os:timestamp(), - riak_core_vnode:reply(Sender, {w, Idx, ReqId}), - UpdState = do_put(Sender, BKey, Object, ReqId, StartTime, Options, State), - update_vnode_stats(vnode_put, Idx, StartTS), - {noreply, UpdState}; - -handle_command(?KV_GET_REQ{bkey=BKey,req_id=ReqId},Sender,State) -> - do_get(Sender, BKey, ReqId, State); -handle_command(#riak_kv_listkeys_req_v2{bucket=Input, req_id=ReqId, caller=Caller}, _Sender, - State=#state{async_folding=AsyncFolding, - key_buf_size=BufferSize, - mod=Mod, - modstate=ModState, - idx=Idx}) -> - case Input of - {filter, Bucket, Filter} -> - ok; - Bucket -> - Filter = none - end, - BufferMod = riak_kv_fold_buffer, - case Bucket of - '_' -> - {ok, Capabilities} = Mod:capabilities(ModState), - AsyncBackend = lists:member(async_fold, Capabilities), - case AsyncFolding andalso AsyncBackend of - true -> - Opts = [async_fold]; - false -> - Opts = [] - end, - BufferFun = - fun(Results) -> - UniqueResults = lists:usort(Results), - Caller ! {ReqId, {kl, Idx, UniqueResults}} - end, - FoldFun = fold_fun(buckets, BufferMod, Filter), - ModFun = fold_buckets; - _ -> - {ok, Capabilities} = Mod:capabilities(Bucket, ModState), - AsyncBackend = lists:member(async_fold, Capabilities), - case AsyncFolding andalso AsyncBackend of - true -> - Opts = [async_fold, {bucket, Bucket}]; - false -> - Opts = [{bucket, Bucket}] - end, - BufferFun = - fun(Results) -> - Caller ! {ReqId, {kl, Idx, Results}} - end, - FoldFun = fold_fun(keys, BufferMod, Filter), - ModFun = fold_keys - end, - Buffer = BufferMod:new(BufferSize, BufferFun), - FinishFun = - fun(Buffer1) -> - riak_kv_fold_buffer:flush(Buffer1), - Caller ! {ReqId, Idx, done} - end, - case list(FoldFun, FinishFun, Mod, ModFun, ModState, Opts, Buffer) of - {async, AsyncWork} -> - {async, {fold, AsyncWork, FinishFun}, Caller, State}; - _ -> - {noreply, State} - end; -handle_command(?KV_DELETE_REQ{bkey=BKey, req_id=ReqId}, _Sender, State) -> - do_delete(BKey, ReqId, State); -handle_command(?KV_VCLOCK_REQ{bkeys=BKeys}, _Sender, State) -> - {reply, do_get_vclocks(BKeys, State), State}; -handle_command(?FOLD_REQ{foldfun=FoldFun, acc0=Acc0}, Sender, State) -> - %% The function in riak_core used for object folding expects the - %% bucket and key pair to be passed as the first parameter, but in - %% riak_kv the bucket and key have been separated. This function - %% wrapper is to address this mismatch. - FoldWrapper = fun(Bucket, Key, Value, Acc) -> - FoldFun({Bucket, Key}, Value, Acc) - end, - do_fold(FoldWrapper, Acc0, Sender, State); - -%% entropy exchange commands -handle_command({hashtree_pid, Node}, _, State=#state{hashtrees=HT}) -> - %% Handle riak_core request forwarding during ownership handoff. - case node() of - Node -> - %% Following is necessary in cases where anti-entropy was enabled - %% after the vnode was already running - case HT of - undefined -> - State2 = maybe_create_hashtrees(State), - {reply, {ok, State2#state.hashtrees}, State2}; - _ -> - {reply, {ok, HT}, State} - end; - _ -> - {reply, {error, wrong_node}, State} - end; - -%% Commands originating from inside this vnode -handle_command({backend_callback, Ref, Msg}, _Sender, - State=#state{mod=Mod, modstate=ModState}) -> - Mod:callback(Ref, Msg, ModState), - {noreply, State}; -handle_command({mapexec_error_noretry, JobId, Err}, _Sender, #state{mrjobs=Jobs}=State) -> - NewState = case dict:find(JobId, Jobs) of - {ok, Job} -> - Jobs1 = dict:erase(JobId, Jobs), - #mrjob{target=Target} = Job, - gen_fsm:send_event(Target, {mapexec_error_noretry, self(), Err}), - State#state{mrjobs=Jobs1}; - error -> - State - end, - {noreply, NewState}; -handle_command({mapexec_reply, JobId, Result}, _Sender, #state{mrjobs=Jobs}=State) -> - NewState = case dict:find(JobId, Jobs) of - {ok, Job} -> - Jobs1 = dict:erase(JobId, Jobs), - #mrjob{target=Target} = Job, - gen_fsm:send_event(Target, {mapexec_reply, Result, self()}), - State#state{mrjobs=Jobs1}; - error -> - State - end, - {noreply, NewState}; -handle_command(?KV_VNODE_STATUS_REQ{}, - _Sender, - State=#state{idx=Index, - mod=Mod, - modstate=ModState}) -> - BackendStatus = {backend_status, Mod, Mod:status(ModState)}, - VNodeStatus = [BackendStatus], - {reply, {vnode_status, Index, VNodeStatus}, State}. - -%% @doc Handle a coverage request. -%% More information about the specification for the ItemFilter -%% parameter can be found in the documentation for the -%% {@link riak_kv_coverage_filter} module. -handle_coverage(?KV_LISTBUCKETS_REQ{item_filter=ItemFilter}, - _FilterVNodes, - Sender, - State=#state{async_folding=AsyncFolding, - bucket_buf_size=BufferSize, - mod=Mod, - modstate=ModState}) -> - %% Construct the filter function - Filter = riak_kv_coverage_filter:build_filter(all, ItemFilter, undefined), - BufferMod = riak_kv_fold_buffer, - Buffer = BufferMod:new(BufferSize, result_fun(Sender)), - FoldFun = fold_fun(buckets, BufferMod, Filter), - FinishFun = finish_fun(BufferMod, Sender), - {ok, Capabilities} = Mod:capabilities(ModState), - AsyncBackend = lists:member(async_fold, Capabilities), - case AsyncFolding andalso AsyncBackend of - true -> - Opts = [async_fold]; - false -> - Opts = [] - end, - case list(FoldFun, FinishFun, Mod, fold_buckets, ModState, Opts, Buffer) of - {async, AsyncWork} -> - {async, {fold, AsyncWork, FinishFun}, Sender, State}; - _ -> - {noreply, State} - end; -handle_coverage(#riak_kv_listkeys_req_v3{bucket=Bucket, - item_filter=ItemFilter}, - FilterVNodes, Sender, State) -> - %% v3 == no backpressure - ResultFun = result_fun(Bucket, Sender), - Opts = [{bucket, Bucket}], - handle_coverage_keyfold(Bucket, ItemFilter, ResultFun, - FilterVNodes, Sender, Opts, State); -handle_coverage(?KV_LISTKEYS_REQ{bucket=Bucket, - item_filter=ItemFilter}, - FilterVNodes, Sender, State) -> - %% v4 == ack-based backpressure - ResultFun = result_fun_ack(Bucket, Sender), - Opts = [{bucket, Bucket}], - handle_coverage_keyfold(Bucket, ItemFilter, ResultFun, - FilterVNodes, Sender, Opts, State); -handle_coverage(#riak_kv_index_req_v1{bucket=Bucket, - item_filter=ItemFilter, - qry=Query}, - FilterVNodes, Sender, State) -> - %% v1 == no backpressure - handle_coverage_index(Bucket, ItemFilter, Query, - FilterVNodes, Sender, State, fun result_fun/2); -handle_coverage(?KV_INDEX_REQ{bucket=Bucket, - item_filter=ItemFilter, - qry=Query}, - FilterVNodes, Sender, State) -> - %% v2 = ack-based backpressure - handle_coverage_index(Bucket, ItemFilter, Query, - FilterVNodes, Sender, State, fun result_fun_ack/2). - -handle_coverage_index(Bucket, ItemFilter, Query, - FilterVNodes, Sender, - State=#state{mod=Mod, - modstate=ModState}, - ResultFunFun) -> - {ok, Capabilities} = Mod:capabilities(Bucket, ModState), - IndexBackend = lists:member(indexes, Capabilities), - case IndexBackend of - true -> - %% Update stats... - riak_kv_stat:update(vnode_index_read), - - ResultFun = ResultFunFun(Bucket, Sender), - Opts = [{index, Bucket, Query}, - {bucket, Bucket}], - handle_coverage_keyfold(Bucket, ItemFilter, ResultFun, - FilterVNodes, Sender, Opts, State); - false -> - {reply, {error, {indexes_not_supported, Mod}}, State} - end. - -%% Convenience for handling both v3 and v4 coverage-based key fold operations -handle_coverage_keyfold(Bucket, ItemFilter, ResultFun, - FilterVNodes, Sender, Opts0, - State=#state{async_folding=AsyncFolding, - idx=Index, - key_buf_size=BufferSize, - mod=Mod, - modstate=ModState}) -> - %% Construct the filter function - FilterVNode = proplists:get_value(Index, FilterVNodes), - Filter = riak_kv_coverage_filter:build_filter(Bucket, ItemFilter, FilterVNode), - BufferMod = riak_kv_fold_buffer, - Buffer = BufferMod:new(BufferSize, ResultFun), - FoldFun = fold_fun(keys, BufferMod, Filter), - FinishFun = finish_fun(BufferMod, Sender), - {ok, Capabilities} = Mod:capabilities(Bucket, ModState), - AsyncBackend = lists:member(async_fold, Capabilities), - case AsyncFolding andalso AsyncBackend of - true -> - Opts = [async_fold | Opts0]; - false -> - Opts = Opts0 - end, - case list(FoldFun, FinishFun, Mod, fold_keys, ModState, Opts, Buffer) of - {async, AsyncWork} -> - {async, {fold, AsyncWork, FinishFun}, Sender, State}; - _ -> - {noreply, State} - end. - -%% While in handoff, vnodes have the option of returning {forward, State} -%% which will cause riak_core to forward the request to the handoff target -%% node. For riak_kv, we issue a put locally as well as forward it in case -%% the vnode has already handed off the previous version. All other requests -%% are handled locally and not forwarded since the relevant data may not have -%% yet been handed off to the target node. Since we do not forward deletes it -%% is possible that we do not clear a tombstone that was already handed off. -%% This is benign as the tombstone will eventually be re-deleted. -handle_handoff_command(Req=?KV_PUT_REQ{}, Sender, State) -> - {noreply, NewState} = handle_command(Req, Sender, State), - {forward, NewState}; -%% Handle all unspecified cases locally without forwarding -handle_handoff_command(Req, Sender, State) -> - handle_command(Req, Sender, State). - - -handoff_starting(_TargetNode, State) -> - {true, State#state{in_handoff=true}}. - -handoff_cancelled(State) -> - {ok, State#state{in_handoff=false}}. - -handoff_finished(_TargetNode, State) -> - {ok, State}. - -handle_handoff_data(BinObj, State) -> - PBObj = riak_core_pb:decode_riakobject_pb(zlib:unzip(BinObj)), - BKey = {PBObj#riakobject_pb.bucket,PBObj#riakobject_pb.key}, - case do_diffobj_put(BKey, binary_to_term(PBObj#riakobject_pb.val), State) of - {ok, UpdModState} -> - {reply, ok, State#state{modstate=UpdModState}}; - {error, Reason, UpdModState} -> - {reply, {error, Reason}, State#state{modstate=UpdModState}}; - Err -> - {reply, {error, Err}, State} - end. - -encode_handoff_item({B, K}, V) -> - zlib:zip(riak_core_pb:encode_riakobject_pb( - #riakobject_pb{bucket=B, key=K, val=V})). - -is_empty(State=#state{mod=Mod, modstate=ModState}) -> - {Mod:is_empty(ModState), State}. - -delete(State=#state{idx=Index,mod=Mod, modstate=ModState}) -> - %% clear vnodeid first, if drop removes data but fails - %% want to err on the side of creating a new vnodeid - {ok, cleared} = clear_vnodeid(Index), - case Mod:drop(ModState) of - {ok, UpdModState} -> - ok; - {error, Reason, UpdModState} -> - lager:error("Failed to drop ~p. Reason: ~p~n", [Mod, Reason]), - ok - end, - case State#state.hashtrees of - undefined -> - ok; - HT -> - riak_kv_index_hashtree:destroy(HT) - end, - {ok, State#state{modstate=UpdModState,vnodeid=undefined,hashtrees=undefined}}. - -terminate(_Reason, #state{mod=Mod, modstate=ModState}) -> - Mod:stop(ModState), - ok. - -handle_info(retry_create_hashtree, State=#state{hashtrees=undefined}) -> - State2 = maybe_create_hashtrees(State), - case State2#state.hashtrees of - undefined -> - ok; - _ -> - lager:info("riak_kv/~p: successfully started index_hashtree on retry", - [State#state.idx]) - end, - {ok, State2}; -handle_info(retry_create_hashtree, State) -> - {ok, State}; -handle_info({'DOWN', _, _, Pid, _}, State=#state{hashtrees=Pid}) -> - State2 = State#state{hashtrees=undefined}, - State3 = maybe_create_hashtrees(State2), - {ok, State3}; -handle_info({'DOWN', _, _, _, _}, State) -> - {ok, State}; -handle_info({final_delete, BKey, RObjHash}, State = #state{mod=Mod, modstate=ModState}) -> - UpdState = case do_get_term(BKey, Mod, ModState) of - {ok, RObj} -> - case delete_hash(RObj) of - RObjHash -> - do_backend_delete(BKey, RObj, State); - _ -> - State - end; - _ -> - State - end, - {ok, UpdState}. - -handle_exit(_Pid, Reason, State) -> - %% A linked processes has died so the vnode - %% process should take appropriate action here. - %% The default behavior is to crash the vnode - %% process so that it can be respawned - %% by riak_core_vnode_master to prevent - %% messages from stacking up on the process message - %% queue and never being processed. - lager:error("Linked process exited. Reason: ~p", [Reason]), - {stop, linked_process_crash, State}. - -%% @private -%% upon receipt of a client-initiated put -do_put(Sender, {Bucket,_Key}=BKey, RObj, ReqID, StartTime, Options, State) -> - case proplists:get_value(bucket_props, Options) of - undefined -> - {ok,Ring} = riak_core_ring_manager:get_my_ring(), - BProps = riak_core_bucket:get_bucket(Bucket, Ring); - BProps -> - BProps - end, - case proplists:get_value(rr, Options, false) of - true -> - PruneTime = undefined; - false -> - PruneTime = StartTime - end, - Coord = proplists:get_value(coord, Options, false), - PutArgs = #putargs{returnbody=Coord orelse proplists:get_value(returnbody,Options,false), - coord=Coord, - lww=proplists:get_value(last_write_wins, BProps, false), - bkey=BKey, - robj=RObj, - reqid=ReqID, - bprops=BProps, - starttime=StartTime, - prunetime=PruneTime}, - {PrepPutRes, UpdPutArgs} = prepare_put(State, PutArgs), - {Reply, UpdState} = perform_put(PrepPutRes, State, UpdPutArgs), - riak_core_vnode:reply(Sender, Reply), - - update_index_write_stats(UpdPutArgs#putargs.is_index, UpdPutArgs#putargs.index_specs), - UpdState. - -do_backend_delete(BKey, RObj, State = #state{mod = Mod, modstate = ModState}) -> - %% object is a tombstone or all siblings are tombstones -<<<<<<< HEAD - -======= ->>>>>>> master - %% Calculate the index specs to remove... - %% JDM: This should just be a tombstone by this point, but better - %% safe than sorry. - IndexSpecs = riak_object:diff_index_specs(undefined, RObj), - - %% Do the delete... - {Bucket, Key} = BKey, - case Mod:delete(Bucket, Key, IndexSpecs, ModState) of - {ok, UpdModState} -> - riak_kv_index_hashtree:delete(BKey, State#state.hashtrees), - update_index_delete_stats(IndexSpecs), - State#state{modstate = UpdModState}; - {error, _Reason, UpdModState} -> - State#state{modstate = UpdModState} - end. - -%% Compute a hash of the deleted object -delete_hash(RObj) -> - erlang:phash2(RObj, 4294967296). - -prepare_put(State=#state{vnodeid=VId, - mod=Mod, - modstate=ModState}, - PutArgs=#putargs{bkey={Bucket, _Key}, - lww=LWW, - robj=RObj, - starttime=StartTime}) -> - %% Can we avoid reading the existing object? If this is not an - %% index backend, and the bucket is set to last-write-wins, then - %% no need to incur additional get. Otherwise, we need to read the - %% old object to know how the indexes have changed. - {ok, Capabilities} = Mod:capabilities(Bucket, ModState), - IndexBackend = lists:member(indexes, Capabilities), - case LWW andalso not IndexBackend of - true -> - ObjToStore = riak_object:increment_vclock(RObj, VId, StartTime), - {{true, ObjToStore}, PutArgs#putargs{is_index = false}}; - false -> - prepare_put(State, PutArgs, IndexBackend) - end. -prepare_put(#state{vnodeid=VId, - mod=Mod, - modstate=ModState}, - PutArgs=#putargs{bkey={Bucket, Key}, - robj=RObj, - bprops=BProps, - coord=Coord, - lww=LWW, - starttime=StartTime, - prunetime=_PruneTime}, - IndexBackend) -> - case Mod:get(Bucket, Key, ModState) of - {error, not_found, _UpdModState} -> - case IndexBackend of - true -> - IndexSpecs = riak_object:index_specs(RObj); - false -> - IndexSpecs = [] - end, - ObjToStore = case Coord of - true -> - riak_object:increment_vclock(RObj, VId, StartTime); - false -> - RObj - end, - {{true, ObjToStore}, PutArgs#putargs{index_specs=IndexSpecs, is_index=IndexBackend}}; - {ok, Val, _UpdModState} -> - OldObj = binary_to_term(Val), - case put_merge(Coord, LWW, OldObj, RObj, VId, StartTime) of - {oldobj, OldObj1} -> - {{false, OldObj1}, PutArgs}; - {newobj, NewObj} -> -% VC = riak_object:vclock(NewObj), - AMObj = enforce_allow_mult(NewObj, BProps), - case IndexBackend of - true -> - IndexSpecs = - riak_object:diff_index_specs(AMObj, - OldObj); - false -> - IndexSpecs = [] - end, -<<<<<<< HEAD -% case PruneTime of -% undefined -> -% ObjToStore = AMObj; -% _ -> -% ObjToStore = -% riak_object:set_vclock(AMObj, -% vclock:prune(VC, -% PruneTime, -% BProps)) -% end, - {{true, AMObj}, -======= - case PruneTime of - undefined -> - ObjToStore = AMObj; - _ -> - ObjToStore = - riak_object:set_vclock(AMObj, - vclock:prune(VC, - PruneTime, - BProps)) - end, - {{true, ObjToStore}, ->>>>>>> master - PutArgs#putargs{index_specs=IndexSpecs, is_index=IndexBackend}} - end - end. - -perform_put({false, Obj}, - #state{idx=Idx}=State, - #putargs{returnbody=true, - reqid=ReqID}) -> - {{dw, Idx, Obj, ReqID}, State}; -perform_put({false, _Obj}, - #state{idx=Idx}=State, - #putargs{returnbody=false, - reqid=ReqId}) -> - {{dw, Idx, ReqId}, State}; -perform_put({true, Obj}, - #state{idx=Idx, - mod=Mod, - modstate=ModState}=State, - #putargs{returnbody=RB, - bkey={Bucket, Key}, - reqid=ReqID, - index_specs=IndexSpecs}) -> - Val = term_to_binary(Obj), - case Mod:put(Bucket, Key, IndexSpecs, Val, ModState) of - {ok, UpdModState} -> - update_hashtree(Bucket, Key, Val, State), - case RB of - true -> - Reply = {dw, Idx, Obj, ReqID}; - false -> - Reply = {dw, Idx, ReqID} - end; - {error, _Reason, UpdModState} -> - Reply = {fail, Idx, ReqID} - end, - {Reply, State#state{modstate=UpdModState}}. - -%% @private -%% enforce allow_mult bucket property so that no backend ever stores -%% an object with multiple contents if allow_mult=false for that bucket -enforce_allow_mult(Obj, BProps) -> - case proplists:get_value(allow_mult, BProps) of - true -> Obj; - _ -> - case riak_object:get_contents(Obj) of - [_] -> Obj; - Mult -> - Clocks = [C || {_,_,C} <- Mult], - Clock = dottedvv:merge(Clocks), - {MD, V, _VC} = select_newest_content(Mult), - riak_object:set_contents(Obj, [{MD, V, Clock}]) - end - end. - -%% @private -%% choose the latest content to store for the allow_mult=false case -select_newest_content(Mult) -> - hd(lists:sort( - fun({MD0, _, _}, {MD1, _, _}) -> - riak_core_util:compare_dates( - dict:fetch(<<"X-Riak-Last-Modified">>, MD0), - dict:fetch(<<"X-Riak-Last-Modified">>, MD1)) - end, - Mult)). - -%% @private -put_merge(false, true, _CurObj, UpdObj, _VId, _StartTime) -> % coord=false, LWW=true - {newobj, UpdObj}; -put_merge(false, false, CurObj, UpdObj, _VId, _StartTime) -> % coord=false, LWW=false - ResObj = riak_object:syntactic_merge(CurObj, UpdObj), -% case ResObj =:= CurObj of - case dottedvv:equal(riak_object:vclock(ResObj), riak_object:vclock(CurObj)) of - true -> - {oldobj, CurObj}; - false -> - {newobj, ResObj} - end; -put_merge(true, true, _CurObj, UpdObj, VId, StartTime) -> % coord=false, LWW=true - {newobj, riak_object:increment_vclock(UpdObj, VId, StartTime)}; -put_merge(true, false, CurObj, UpdObj, VId, _StartTime) -> - UpdObj1 = riak_object:update_vclock(UpdObj, CurObj, VId), - ResObj = riak_object:syntactic_merge(CurObj, UpdObj1), - {newobj, ResObj}. -% UpdVC = riak_object:vclock(UpdObj1), -% CurVC = riak_object:vclock(CurObj), -% -% %% Check the coord put will replace the existing object -% case vclock:get_counter(VId, UpdVC) > vclock:get_counter(VId, CurVC) andalso -% vclock:descends(CurVC, UpdVC) == false andalso -% vclock:descends(UpdVC, CurVC) == true of -% true -> -% {newobj, UpdObj1}; -% false -> -% %% If not, make sure it does -% {newobj, riak_object:increment_vclock( -% riak_object:merge(CurObj, UpdObj1), VId, StartTime)} -% end. - -%% @private -do_get(_Sender, BKey, ReqID, - State=#state{idx=Idx,mod=Mod,modstate=ModState}) -> - StartTS = os:timestamp(), - Retval = do_get_term(BKey, Mod, ModState), - update_vnode_stats(vnode_get, Idx, StartTS), - {reply, {r, Retval, Idx, ReqID}, State}. - -%% @private -do_get_term(BKey, Mod, ModState) -> - case do_get_binary(BKey, Mod, ModState) of - {ok, Bin, _UpdModState} -> - {ok, binary_to_term(Bin)}; - %% @TODO Eventually it would be good to - %% make the use of not_found or notfound - %% consistent throughout the code. - {error, not_found, _UpdatedModstate} -> - {error, notfound}; - {error, Reason, _UpdatedModstate} -> - {error, Reason}; - Err -> - Err - end. - -do_get_binary({Bucket, Key}, Mod, ModState) -> - Mod:get(Bucket, Key, ModState). - -%% @private -%% @doc This is a generic function for operations that involve -%% listing things from the backend. Examples are listing buckets, -%% listing keys, or doing secondary index queries. -list(FoldFun, FinishFun, Mod, ModFun, ModState, Opts, Buffer) -> - case Mod:ModFun(FoldFun, Buffer, Opts, ModState) of - {ok, Acc} -> - FinishFun(Acc); - {async, AsyncWork} -> - {async, AsyncWork} - end. - -%% @private -fold_fun(buckets, BufferMod, none) -> - fun(Bucket, Buffer) -> - BufferMod:add(Bucket, Buffer) - end; -fold_fun(buckets, BufferMod, Filter) -> - fun(Bucket, Buffer) -> - case Filter(Bucket) of - true -> - BufferMod:add(Bucket, Buffer); - false -> - Buffer - end - end; -fold_fun(keys, BufferMod, none) -> - fun(_, Key, Buffer) -> - BufferMod:add(Key, Buffer) - end; -fold_fun(keys, BufferMod, Filter) -> - fun(_, Key, Buffer) -> - case Filter(Key) of - true -> - BufferMod:add(Key, Buffer); - false -> - Buffer - end - end. - -%% @private -result_fun(Sender) -> - fun(Items) -> - riak_core_vnode:reply(Sender, Items) - end. - -%% @private -result_fun(Bucket, Sender) -> - fun(Items) -> - riak_core_vnode:reply(Sender, {Bucket, Items}) - end. - -%% wait for acknowledgement that results were received before -%% continuing, as a way of providing backpressure for processes that -%% can't handle results as fast as we can send them -result_fun_ack(Bucket, Sender) -> - fun(Items) -> - Monitor = riak_core_vnode:monitor(Sender), - riak_core_vnode:reply(Sender, {{self(), Monitor}, Bucket, Items}), - receive - {Monitor, ok} -> - erlang:demonitor(Monitor, [flush]); - {'DOWN', Monitor, process, _Pid, _Reason} -> - throw(receiver_down) - end - end. - -%% @doc If a listkeys request sends a result of `{From, Bucket, -%% Items}', that means it wants acknowledgement of those items before -%% it will send more. Call this function with that `From' to trigger -%% the next batch. --spec ack_keys(From::{pid(), reference()}) -> term(). -ack_keys({Pid, Ref}) -> - Pid ! {Ref, ok}. - -%% @private -finish_fun(BufferMod, Sender) -> - fun(Buffer) -> - finish_fold(BufferMod, Buffer, Sender) - end. - -%% @private -finish_fold(BufferMod, Buffer, Sender) -> - BufferMod:flush(Buffer), - riak_core_vnode:reply(Sender, done). - -%% @private -do_delete(BKey, ReqId, State) -> - Mod = State#state.mod, - ModState = State#state.modstate, - Idx = State#state.idx, - DeleteMode = State#state.delete_mode, - - %% Get the existing object. - case do_get_term(BKey, Mod, ModState) of - {ok, RObj} -> - %% Object exists, check if it should be deleted. - case riak_kv_util:obj_not_deleted(RObj) of - undefined -> - case DeleteMode of - keep -> - %% keep tombstones indefinitely - {reply, {fail, Idx, ReqId}, State}; - immediate -> - UpdState = do_backend_delete(BKey, RObj, State), - {reply, {del, Idx, ReqId}, UpdState}; - Delay when is_integer(Delay) -> - erlang:send_after(Delay, self(), - {final_delete, BKey, - delete_hash(RObj)}), - %% Nothing checks these messages - will just reply - %% del for now until we can refactor. - {reply, {del, Idx, ReqId}, State} - end; - _ -> - %% not a tombstone or not all siblings are tombstones - {reply, {fail, Idx, ReqId}, State} - end; - _ -> - %% does not exist in the backend - {reply, {fail, Idx, ReqId}, State} - end. - -%% @private -do_fold(Fun, Acc0, Sender, State=#state{async_folding=AsyncFolding, - mod=Mod, - modstate=ModState}) -> - {ok, Capabilities} = Mod:capabilities(ModState), - AsyncBackend = lists:member(async_fold, Capabilities), - case AsyncFolding andalso AsyncBackend of - true -> - Opts = [async_fold]; - false -> - Opts = [] - end, - case Mod:fold_objects(Fun, Acc0, Opts, ModState) of - {ok, Acc} -> - {reply, Acc, State}; - {async, Work} -> - FinishFun = - fun(Acc) -> - riak_core_vnode:reply(Sender, Acc) - end, - {async, {fold, Work, FinishFun}, Sender, State}; - ER -> - {reply, ER, State} - end. - -%% @private -do_get_vclocks(KeyList,_State=#state{mod=Mod,modstate=ModState}) -> - [{BKey, do_get_vclock(BKey,Mod,ModState)} || BKey <- KeyList]. -%% @private -do_get_vclock({Bucket, Key}, Mod, ModState) -> - case Mod:get(Bucket, Key, ModState) of - {error, not_found, _UpdModState} -> vclock:fresh(); - {ok, Val, _UpdModState} -> riak_object:vclock(binary_to_term(Val)) - end. - -%% @private -%% upon receipt of a handoff datum, there is no client FSM -do_diffobj_put({Bucket, Key}, DiffObj, -<<<<<<< HEAD - _StateData=#state{mod=Mod, - modstate=ModState, - idx=Idx}) -> -======= - StateData=#state{mod=Mod, - modstate=ModState, - idx=Idx}) -> ->>>>>>> master - StartTS = os:timestamp(), - {ok, Capabilities} = Mod:capabilities(Bucket, ModState), - IndexBackend = lists:member(indexes, Capabilities), - case Mod:get(Bucket, Key, ModState) of - {error, not_found, _UpdModState} -> - case IndexBackend of - true -> - IndexSpecs = riak_object:index_specs(DiffObj); - false -> - IndexSpecs = [] - end, - Val = term_to_binary(DiffObj), - Res = Mod:put(Bucket, Key, IndexSpecs, Val, ModState), - case Res of - {ok, _UpdModState} -> -<<<<<<< HEAD -======= - update_hashtree(Bucket, Key, Val, StateData), ->>>>>>> master - update_index_write_stats(IndexBackend, IndexSpecs), - update_vnode_stats(vnode_put, Idx, StartTS); - _ -> nop - end, - Res; - {ok, Val0, _UpdModState} -> - OldObj = binary_to_term(Val0), - %% Merge handoff values with the current - possibly discarding - %% if out of date. Ok to set VId/Starttime undefined as - %% they are not used for non-coordinating puts. - case put_merge(false, false, OldObj, DiffObj, undefined, undefined) of - {oldobj, _} -> - {ok, ModState}; - {newobj, NewObj} -> - AMObj = enforce_allow_mult(NewObj, riak_core_bucket:get_bucket(Bucket)), - case IndexBackend of - true -> - IndexSpecs = riak_object:diff_index_specs(AMObj, OldObj); - false -> - IndexSpecs = [] - end, - Val = term_to_binary(AMObj), - Res = Mod:put(Bucket, Key, IndexSpecs, Val, ModState), - case Res of - {ok, _UpdModState} -> -<<<<<<< HEAD -======= - update_hashtree(Bucket, Key, Val, StateData), ->>>>>>> master - update_index_write_stats(IndexBackend, IndexSpecs), - update_vnode_stats(vnode_put, Idx, StartTS); - _ -> - nop - end, - Res - end - end. - --spec update_hashtree(binary(), binary(), binary(), state()) -> ok. -update_hashtree(Bucket, Key, Val, #state{hashtrees=Trees}) -> - riak_kv_index_hashtree:insert_object({Bucket, Key}, Val, Trees). - -%% @private - -%% Get the vnodeid, assigning and storing if necessary -get_vnodeid(Index) -> - F = fun(Status) -> - case proplists:get_value(vnodeid, Status, undefined) of - undefined -> - assign_vnodeid(os:timestamp(), - riak_core_nodeid:get(), - Status); - VnodeId -> - {VnodeId, Status} - end - end, - update_vnode_status(F, Index). % Returns {ok, VnodeId} | {error, Reason} - -%% Assign a unique vnodeid, making sure the timestamp is unique by incrementing -%% into the future if necessary. -assign_vnodeid(Now, NodeId, Status) -> - {Mega, Sec, _Micro} = Now, - NowEpoch = 1000000*Mega + Sec, - LastVnodeEpoch = proplists:get_value(last_epoch, Status, 0), - VnodeEpoch = erlang:max(NowEpoch, LastVnodeEpoch+1), - VnodeId = <>, - UpdStatus = [{vnodeid, VnodeId}, {last_epoch, VnodeEpoch} | - proplists:delete(vnodeid, - proplists:delete(last_epoch, Status))], - {VnodeId, UpdStatus}. - -%% Clear the vnodeid - returns {ok, cleared} -clear_vnodeid(Index) -> - F = fun(Status) -> - {cleared, proplists:delete(vnodeid, Status)} - end, - update_vnode_status(F, Index). % Returns {ok, VnodeId} | {error, Reason} - -update_vnode_status(F, Index) -> - VnodeFile = vnode_status_filename(Index), - ok = filelib:ensure_dir(VnodeFile), - case read_vnode_status(VnodeFile) of - {ok, Status} -> - update_vnode_status2(F, Status, VnodeFile); - {error, enoent} -> - update_vnode_status2(F, [], VnodeFile); - ER -> - ER - end. - -update_vnode_status2(F, Status, VnodeFile) -> - case F(Status) of - {Ret, Status} -> % No change - {ok, Ret}; - {Ret, UpdStatus} -> - case write_vnode_status(UpdStatus, VnodeFile) of - ok -> - {ok, Ret}; - ER -> - ER - end - end. - -vnode_status_filename(Index) -> - P_DataDir = app_helper:get_env(riak_core, platform_data_dir), - VnodeStatusDir = app_helper:get_env(riak_kv, vnode_status, - filename:join(P_DataDir, "kv_vnode")), - filename:join(VnodeStatusDir, integer_to_list(Index)). - -read_vnode_status(File) -> - case file:consult(File) of - {ok, [Status]} when is_list(Status) -> - {ok, proplists:delete(version, Status)}; - ER -> - ER - end. - -write_vnode_status(Status, File) -> - VersionedStatus = [{version, 1} | proplists:delete(version, Status)], - TmpFile = File ++ "~", - case file:write_file(TmpFile, io_lib:format("~p.", [VersionedStatus])) of - ok -> - file:rename(TmpFile, File); - ER -> - ER - end. - -%% @private -wait_for_vnode_status_results([], _ReqId, Acc) -> - Acc; -wait_for_vnode_status_results(PrefLists, ReqId, Acc) -> - receive - {ReqId, {vnode_status, Index, Status}} -> - UpdPrefLists = proplists:delete(Index, PrefLists), - wait_for_vnode_status_results(UpdPrefLists, - ReqId, - [{Index, Status} | Acc]); - _ -> - wait_for_vnode_status_results(PrefLists, ReqId, Acc) - end. - -%% @private --spec update_vnode_stats(vnode_get | vnode_put, partition(), erlang:timestamp()) -> - ok. -update_vnode_stats(Op, Idx, StartTS) -> - riak_kv_stat:update({Op, Idx, timer:now_diff( os:timestamp(), StartTS)}). - -%% @private -update_index_write_stats(false, _IndexSpecs) -> - ok; -update_index_write_stats(true, IndexSpecs) -> - {Added, Removed} = count_index_specs(IndexSpecs), - riak_kv_stat:update({vnode_index_write, Added, Removed}). - -%% @private -update_index_delete_stats(IndexSpecs) -> - {_Added, Removed} = count_index_specs(IndexSpecs), - riak_kv_stat:update({vnode_index_delete, Removed}). - -%% @private -%% @doc Given a list of index specs, return the number to add and -%% remove. -count_index_specs(IndexSpecs) -> - %% Count index specs... - F = fun({add, _, _}, {AddAcc, RemoveAcc}) -> - {AddAcc + 1, RemoveAcc}; - ({remove, _, _}, {AddAcc, RemoveAcc}) -> - {AddAcc, RemoveAcc + 1} - end, - lists:foldl(F, {0, 0}, IndexSpecs). - -%% @private -bucket_nval_map(Ring) -> - [{riak_core_bucket:name(B), riak_core_bucket:n_val(B)} || - B <- riak_core_bucket:get_buckets(Ring)]. - -%% @private -default_object_nval() -> - riak_core_bucket:n_val(riak_core_config:default_bucket_props()). - -%% @private -object_info({Bucket, _Key}=BKey) -> - Hash = riak_core_util:chash_key(BKey), - {Bucket, Hash}. - - --ifdef(TEST). - -%% Check assigning a vnodeid twice in the same second -assign_vnodeid_restart_same_ts_test() -> - Now1 = {1314,224520,343446}, %% TS=1314224520 - Now2 = {1314,224520,345865}, %% as unsigned net-order int <<78,85,121,136>> - NodeId = <<1, 2, 3, 4>>, - {Vid1, Status1} = assign_vnodeid(Now1, NodeId, []), - ?assertEqual(<<1, 2, 3, 4, 78, 85, 121, 136>>, Vid1), - %% Simulate clear - Status2 = proplists:delete(vnodeid, Status1), - %% Reassign - {Vid2, _Status3} = assign_vnodeid(Now2, NodeId, Status2), - ?assertEqual(<<1, 2, 3, 4, 78, 85, 121, 137>>, Vid2). - -%% Check assigning a vnodeid with a later date -assign_vnodeid_restart_later_ts_test() -> - Now1 = {1000,000000,0}, %% <<59,154,202,0>> - Now2 = {2000,000000,0}, %% <<119,53,148,0>> - NodeId = <<1, 2, 3, 4>>, - {Vid1, Status1} = assign_vnodeid(Now1, NodeId, []), - ?assertEqual(<<1, 2, 3, 4, 59,154,202,0>>, Vid1), - %% Simulate clear - Status2 = proplists:delete(vnodeid, Status1), - %% Reassign - {Vid2, _Status3} = assign_vnodeid(Now2, NodeId, Status2), - ?assertEqual(<<1, 2, 3, 4, 119,53,148,0>>, Vid2). - -%% Check assigning a vnodeid with a later date - just in case of clock skew -assign_vnodeid_restart_earlier_ts_test() -> - Now1 = {2000,000000,0}, %% <<119,53,148,0>> - Now2 = {1000,000000,0}, %% <<59,154,202,0>> - NodeId = <<1, 2, 3, 4>>, - {Vid1, Status1} = assign_vnodeid(Now1, NodeId, []), - ?assertEqual(<<1, 2, 3, 4, 119,53,148,0>>, Vid1), - %% Simulate clear - Status2 = proplists:delete(vnodeid, Status1), - %% Reassign - %% Should be greater than last offered - which is the 2mil timestamp - {Vid2, _Status3} = assign_vnodeid(Now2, NodeId, Status2), - ?assertEqual(<<1, 2, 3, 4, 119,53,148,1>>, Vid2). - -%% Test -vnode_status_test_() -> - {setup, - fun() -> - filelib:ensure_dir("kv_vnode_status_test/.test"), - ?cmd("chmod u+rwx kv_vnode_status_test"), - ?cmd("rm -rf kv_vnode_status_test"), - application:set_env(riak_kv, vnode_status, "kv_vnode_status_test"), - ok - end, - fun(_) -> - application:unset_env(riak_kv, vnode_status), - ?cmd("chmod u+rwx kv_vnode_status_test"), - ?cmd("rm -rf kv_vnode_status_test"), - ok - end, - [?_test(begin % initial create failure - ?cmd("rm -rf kv_vnode_status_test || true"), - ?cmd("mkdir kv_vnode_status_test"), - ?cmd("chmod -w kv_vnode_status_test"), - F = fun([]) -> - {shouldfail, [badperm]} - end, - Index = 0, - ?assertEqual({error, eacces}, update_vnode_status(F, Index)) - end), - ?_test(begin % create successfully - ?cmd("chmod +w kv_vnode_status_test"), - - F = fun([]) -> - {created, [created]} - end, - Index = 0, - ?assertEqual({ok, created}, update_vnode_status(F, Index)) - end), - ?_test(begin % update successfully - F = fun([created]) -> - {updated, [updated]} - end, - Index = 0, - ?assertEqual({ok, updated}, update_vnode_status(F, Index)) - end), - ?_test(begin % update failure - ?cmd("chmod 000 kv_vnode_status_test/0"), - ?cmd("chmod 500 kv_vnode_status_test"), - F = fun([updated]) -> - {shouldfail, [updatedagain]} - end, - Index = 0, - ?assertEqual({error, eacces}, update_vnode_status(F, Index)) - end) - - ]}. - -dummy_backend(BackendMod) -> - Ring = riak_core_ring:fresh(16,node()), - riak_core_ring_manager:set_ring_global(Ring), - application:set_env(riak_kv, async_folds, false), - application:set_env(riak_kv, storage_backend, BackendMod), - application:set_env(riak_core, default_bucket_props, []), - application:set_env(bitcask, data_root, bitcask_test_dir()), - application:set_env(eleveldb, data_root, eleveldb_test_dir()), - application:set_env(riak_kv, multi_backend_default, multi_dummy_memory1), - application:set_env(riak_kv, multi_backend, - [{multi_dummy_memory1, riak_kv_memory_backend, []}, - {multi_dummy_memory2, riak_kv_memory_backend, []}]). - -bitcask_test_dir() -> - "./test.bitcask-temp-data". - -eleveldb_test_dir() -> - "./test.eleveldb-temp-data". - - -backend_with_known_key(BackendMod) -> - dummy_backend(BackendMod), - {ok, S1} = init([0]), - B = <<"f">>, - K = <<"b">>, - O = riak_object:new(B, K, <<"z">>), - {noreply, S2} = handle_command(?KV_PUT_REQ{bkey={B,K}, - object=O, - req_id=123, - start_time=riak_core_util:moment(), - options=[]}, - {raw, 456, self()}, - S1), - {S2, B, K}. - -list_buckets_test_() -> - {foreach, - fun() -> - application:start(sasl), - Env = application:get_all_env(riak_kv), - application:start(folsom), - riak_core_stat_cache:start_link(), - riak_kv_stat:register_stats(), - Env - end, - fun(Env) -> - riak_core_stat_cache:stop(), - application:stop(folsom), - application:stop(sasl), - [application:unset_env(riak_kv, K) || - {K, _V} <- application:get_all_env(riak_kv)], - [application:set_env(riak_kv, K, V) || {K, V} <- Env] - end, - [ - fun(_) -> - {"bitcask list buckets", - fun() -> - list_buckets_test_i(riak_kv_bitcask_backend) - end - } - end, - fun(_) -> - {"eleveldb list buckets", - fun() -> - list_buckets_test_i(riak_kv_eleveldb_backend) - end - } - end, - fun(_) -> - {"memory list buckets", - fun() -> - list_buckets_test_i(riak_kv_memory_backend), - ok - end - } - end, - fun(_) -> - {"multi list buckets", - fun() -> - list_buckets_test_i(riak_kv_multi_backend), - ok - end - } - end - ] - }. - -list_buckets_test_i(BackendMod) -> - {S, B, _K} = backend_with_known_key(BackendMod), - Caller = new_result_listener(buckets), - handle_coverage(?KV_LISTBUCKETS_REQ{item_filter=none}, [], - {fsm, {456, {0, node()}}, Caller}, S), - ?assertEqual({ok, [B]}, results_from_listener(Caller)), - flush_msgs(). - -filter_keys_test() -> - {S, B, K} = backend_with_known_key(riak_kv_memory_backend), - Caller1 = new_result_listener(keys), - handle_coverage(?KV_LISTKEYS_REQ{bucket=B, - item_filter=fun(_) -> true end}, [], - {fsm, {124, {0, node()}}, Caller1}, S), - ?assertEqual({ok, [K]}, results_from_listener(Caller1)), - - Caller2 = new_result_listener(keys), - handle_coverage(?KV_LISTKEYS_REQ{bucket=B, - item_filter=fun(_) -> false end}, [], - {fsm, {125, {0, node()}}, Caller2}, S), - ?assertEqual({ok, []}, results_from_listener(Caller2)), - - Caller3 = new_result_listener(keys), - handle_coverage(?KV_LISTKEYS_REQ{bucket= <<"g">>, - item_filter=fun(_) -> true end}, [], - {fsm, {126, {0, node()}}, Caller3}, S), - ?assertEqual({ok, []}, results_from_listener(Caller3)), - - flush_msgs(). - -new_result_listener(Type) -> - case Type of - buckets -> - ResultFun = fun() -> result_listener_buckets([]) end; - keys -> - ResultFun = fun() -> result_listener_keys([]) end - end, - spawn(ResultFun). - -result_listener_buckets(Acc) -> - receive - {'$gen_event', {_, done}} -> - result_listener_done(Acc); - {'$gen_event', {_, Results}} -> - result_listener_buckets(Results ++ Acc) - - after 5000 -> - result_listener_done({timeout, Acc}) - end. - -result_listener_keys(Acc) -> - receive - {'$gen_event', {_, done}} -> - result_listener_done(Acc); - {'$gen_event', {_, {_Bucket, Results}}} -> - result_listener_keys(Results ++ Acc); - {'$gen_event', {_, {From, _Bucket, Results}}} -> - riak_kv_vnode:ack_keys(From), - result_listener_keys(Results ++ Acc) - after 5000 -> - result_listener_done({timeout, Acc}) - end. - -result_listener_done(Result) -> - receive - {get_results, Pid} -> - Pid ! {listener_results, Result} - end. - -results_from_listener(Listener) -> - Listener ! {get_results, self()}, - receive - {listener_results, Result} -> - {ok, Result} - after 5000 -> - {error, listener_timeout} - end. - -flush_msgs() -> - receive - _Msg -> - flush_msgs() - after - 0 -> - ok - end. - --endif. diff --git a/test/fsm_eqc_util.erl.orig b/test/fsm_eqc_util.erl.orig deleted file mode 100644 index 6582754449..0000000000 --- a/test/fsm_eqc_util.erl.orig +++ /dev/null @@ -1,320 +0,0 @@ --module(fsm_eqc_util). --compile([export_all]). - --ifdef(EQC). - --include_lib("eqc/include/eqc.hrl"). --define(RING_KEY, riak_ring). - -not_empty(G) -> - ?SUCHTHAT(X, G, X /= [] andalso X /= <<>>). - -longer_list(K, G) -> - ?SIZED(Size, resize(trunc(K*Size), list(resize(Size, G)))). - -node_status() -> - frequency([{1, ?SHRINK(down, [up])}, - {9, up}]). - -%% Make sure at least one node is up - code in riak_kv_util makes -%% some assumptions that the node the get FSM is running on is -%% in the cluster causing problems if it isn't. -at_least_one_up(G) -> - ?SUCHTHAT(X, G, lists:member(up, X)). - -num_partitions() -> - %% TODO: use some unfortunate partition counts (1, 50, etc.) - % elements([4, 16, 64]). - ?LET(N, choose(0, 6), pow(2, N)). - -largenat() -> - ?LET(X, largeint(), abs(X)). - -bkey() -> - %%TODO: "make this nastier" - %%TODO: once json encoding of bkeys as binaries rather than utf8 strings - %% start creating general binaries instead - {non_blank_string(), %% bucket - non_blank_string()}. %% key - -non_blank_string() -> - ?LET(X,not_empty(list(lower_char())), list_to_binary(X)). - -%% Generate a lower 7-bit ACSII character that should not cause any problems -%% with utf8 conversion. -lower_char() -> - choose(16#20, 16#7f). - - -vclock() -> - ?LET(VclockSym, vclock_sym(), eval(VclockSym)). - -vclock_sym() -> - ?LAZY( - oneof([ - {call, vclock, fresh, []}, - ?LETSHRINK([Clock], [vclock_sym()], - {call, ?MODULE, increment, - [noshrink(binary(4)), nat(), Clock]}) - ])). - -increment(Actor, Count, Vclock) -> - lists:foldl( - fun vclock:increment/2, - Vclock, - lists:duplicate(Count, Actor)). - -riak_object() -> - ?LET({{Bucket, Key}, Vclock, Value}, - {bkey(), vclock(), binary()}, - riak_object:set_vclock( - riak_object:new(Bucket, Key, Value), - Vclock)). - -maybe_tombstone() -> - weighted_default({2, notombstone}, {1, tombstone}). - -%% -%% ancestor -%% / | \ -%% brother sister otherbrother -%% \ | / -%% current -%% -lineage() -> - elements([current, ancestor, brother, sister, otherbrother]). - -merge(ancestor, Lineage) -> Lineage; % order should match Clocks list in riak_objects -merge(Lineage, ancestor) -> Lineage; % as last modified is used as tie breaker with -merge(_, current) -> current; % allow_mult=false -merge(current, _) -> current; -merge(otherbrother, _) -> otherbrother; -merge(_, otherbrother) -> otherbrother; -merge(sister, _) -> sister; -merge(_, sister) -> sister; -merge(brother, _) -> brother; -merge(_, brother) -> brother. - -merge([Lin]) -> - Lin; -merge([Lin|Lins]) -> - merge(Lin, merge(Lins)). - -partval() -> - Shrink = fun(G) -> ?SHRINK(G, [{ok, current}]) end, - frequency([{2,{ok, lineage()}}, - {1,Shrink(notfound)}, - {1,Shrink(timeout)}, - {1,Shrink(error)}]). - -partvals() -> - not_empty(fsm_eqc_util:longer_list(2, partval())). - -%% Generate 5 riak objects with the same bkey -%% -riak_objects() -> - ?LET({{Bucket,Key},AncestorVclock0,Tombstones}, - {noshrink(bkey()),vclock(),vector(5, maybe_tombstone())}, - begin - AncestorVclock = vclock:increment(<<"dad">>, AncestorVclock0), - BrotherVclock = vclock:increment(<<"bro!">>, AncestorVclock), - OtherBroVclock = vclock:increment(<<"bro2">>, AncestorVclock), - SisterVclock = vclock:increment(<<"sis!">>, AncestorVclock), - CurrentVclock = vclock:merge([BrotherVclock,SisterVclock,OtherBroVclock]), - Clocks = [{ancestor, AncestorVclock, <<"ancestor">>}, - {brother, BrotherVclock, <<"brother">>}, - {sister, SisterVclock, <<"sister">>}, - {otherbrother, OtherBroVclock, <<"otherbrother">>}, - {current, CurrentVclock, <<"current">>}], - [ {Lineage, build_riak_obj(Bucket, Key, Vclock, Value, Tombstone)} - || {{Lineage, Vclock, Value}, Tombstone} <- lists:zip(Clocks, Tombstones) ] - end). - -build_riak_obj(B,K,Vc,Val,notombstone) -> - riak_object:set_contents( - riak_object:set_vclock( - riak_object:new(B,K,Val), - Vc), - [{dict:from_list([{<<"X-Riak-Last-Modified">>,now()}]), Val}]); -build_riak_obj(B,K,Vc,Val,tombstone) -> - Obj = build_riak_obj(B,K,Vc,Val,notombstone), - add_tombstone(Obj). - -add_tombstone(Obj) -> - [{M,V}] = riak_object:get_contents(Obj), - NewM = dict:store(<<"X-Riak-Deleted">>, true, M), - riak_object:set_contents(Obj, [{NewM, V}]). - - -some_up_node_status(NumNodes) -> - at_least_one_up(nodes_status(NumNodes)). - -nodes_status(NumNodes) -> - non_empty(longer_list(NumNodes, node_status())). - -pow(_, 0) -> 1; -pow(A, N) -> A * pow(A, N - 1). - -make_power_of_two(Q) -> make_power_of_two(Q, 1). - -make_power_of_two(Q, P) when P >= Q -> P; -make_power_of_two(Q, P) -> make_power_of_two(Q, P*2). - -cycle(N, Xs=[_|_]) when N >= 0 -> - cycle(Xs, N, Xs). - -cycle(_Zs, 0, _Xs) -> - []; -cycle(Zs, N, [X|Xs]) -> - [X|cycle(Zs, N - 1, Xs)]; -cycle(Zs, N, []) -> - cycle(Zs, N, Zs). - -start_mock_servers() -> - %% Start new core_vnode based EQC FSM test mock - case whereis(fsm_eqc_vnode) of - undefined -> ok; - Pid2 -> - unlink(Pid2), - exit(Pid2, shutdown), - riak_kv_test_util:wait_for_pid(Pid2) - end, - {ok, _Pid3} = fsm_eqc_vnode:start_link(), - application:load(riak_core), - application:start(crypto), - application:start(folsom), -<<<<<<< HEAD -======= - start_fake_get_put_monitor(), ->>>>>>> master - riak_core_stat_cache:start_link(), - riak_kv_stat:register_stats(), - riak_core_ring_events:start_link(), - riak_core_node_watcher_events:start_link(), - riak_core_node_watcher:start_link(), - riak_core_node_watcher:service_up(riak_kv, self()), - ok. - -cleanup_mock_servers() -> -<<<<<<< HEAD -======= - stop_fake_get_put_monitor(), ->>>>>>> master - application:stop(folsom), - application:stop(riak_core). - -make_options([], Options) -> - Options; -make_options([{_Name, missing} | Rest], Options) -> - make_options(Rest, Options); -make_options([Option | Rest], Options) -> - make_options(Rest, [Option | Options]). - -mock_ring(Q0, NodeStatus0) -> - %% Round up to next power of two - Q = fsm_eqc_util:make_power_of_two(Q0), - - %% Expand the node status to match the size of the ring - NodeStatus = cycle(Q, NodeStatus0), - - %% Assign the node owners and store the ring. - Ring = reassign_nodes(NodeStatus, riak_core_ring:fresh(Q, node())), - mochiglobal:put(?RING_KEY, Ring), - - %% Return details - useful for ?WHENFAILs - {Q, Ring, NodeStatus}. - -reassign_nodes(Status, Ring) -> - Ids = [ I || {I, _} <- riak_core_ring:all_owners(Ring) ], - lists:foldl( - fun({down, Id}, R) -> - riak_core_ring:transfer_node(Id, 'notanode@localhost', R); - (_, R) -> R - end, Ring, lists:zip(Status, Ids)). - - -wait_for_req_id(ReqId, Pid) -> - receive - {'EXIT', Pid, _Reason} -> - io:format(user, "FSM died:\n~p\n", [_Reason]), - %{exit, _Reason}; - %% Mark as timeout for now - no reply is coming, so why wait - timeout; - {'EXIT', _OtherPid, _Reason} -> - %% Probably from previous test death - wait_for_req_id(ReqId, Pid); - {ReqId, Response} -> - Response; - Anything1 -> - {anything, Anything1} - after 400 -> - timeout - end. - -start_fake_get_put_monitor() -> - Pid = spawn_link(?MODULE, fake_get_put_monitor, [undefined]), - case whereis(riak_kv_get_put_monitor) of - undefined -> - ok; - OldPid -> - unlink(OldPid), - exit(OldPid, shutdown), - riak_kv_test_util:wait_for_pid(OldPid) - end, - register(riak_kv_get_put_monitor, Pid), - {ok, Pid}. - -stop_fake_get_put_monitor() -> - case whereis(riak_kv_get_put_monitor) of - undefined -> - ok; - Pid -> - unlink(Pid), - exit(Pid, shutdown), - riak_kv_test_util:wait_for_pid(Pid) - end. - -fake_get_put_monitor(LastCast) -> - receive - {'$gen_call', From, last_cast} -> - gen_server:reply(From, LastCast), - fake_get_put_monitor(LastCast); - {'$gen_cast', stop} -> - ok; - {'$gen_cast', NewCast} -> - fake_get_put_monitor(NewCast); - _ -> - fake_get_put_monitor(LastCast) - end. - -is_get_put_last_cast(Type, Pid) -> - case gen_server:call(riak_kv_get_put_monitor, last_cast) of - {get_fsm_spawned, Pid} when Type == get -> - true; - {put_fsm_spawned, Pid} when Type == put -> - true; - _ -> - false - end. - -start_fake_rng(ProcessName) -> - Pid = spawn_link(?MODULE, fake_rng, [1]), - register(ProcessName, Pid), - {ok, Pid}. - -set_fake_rng(ProcessName, Val) -> - gen_server:cast(ProcessName, {set, Val}). - -get_fake_rng(ProcessName) -> - gen_server:call(ProcessName, get). - -fake_rng(N) -> - receive - {'$gen_call', From, get} -> - gen_server:reply(From, N), - fake_rng(N); - {'$gen_cast', {set, NewN}} -> - fake_rng(NewN) - end. - --endif. % EQC diff --git a/test/keys_fsm_eqc.erl.orig b/test/keys_fsm_eqc.erl.orig deleted file mode 100644 index 9fcec2779c..0000000000 --- a/test/keys_fsm_eqc.erl.orig +++ /dev/null @@ -1,221 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% keys_fsm_eqc: Quickcheck testing for the key listing fsm. -%% -%% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(keys_fsm_eqc). - --ifdef(EQC). --include_lib("eqc/include/eqc.hrl"). --include_lib("eunit/include/eunit.hrl"). --include_lib("riak_kv_vnode.hrl"). - --import(fsm_eqc_util, [non_blank_string/0]). - --compile(export_all). - --define(TEST_ITERATIONS, 50). --define(QC_OUT(P), - eqc:on_output(fun(Str, Args) -> io:format(user, Str, Args) end, P)). - -%%==================================================================== -%% eunit test -%%==================================================================== - -eqc_test_() -> - {spawn, - [{setup, - riak_kv_test_util:common_setup(?MODULE, fun configure/1), - riak_kv_test_util:common_cleanup(?MODULE, fun configure/1), - [%% Run the quickcheck tests - {timeout, 60000, % timeout is in msec - ?_assertEqual(true, quickcheck(numtests(?TEST_ITERATIONS, ?QC_OUT(prop_basic_listkeys()))))} - ] - } - ] - }. - -%% Call unused callback functions to clear them in the coverage -%% checker so the real code stands out. -coverage_test() -> - riak_kv_test_util:call_unused_fsm_funs(riak_core_coverage_fsm). - -%% ==================================================================== -%% eqc property -%% ==================================================================== - -prop_basic_listkeys() -> - ?FORALL({ReqId, Bucket, KeyFilter, NVal, ObjectCount, Timeout}, - {g_reqid(), g_bucket(), g_key_filter(), g_n_val(), g_object_count(), g_timeout()}, - ?TRAPEXIT( - begin - riak_kv_memory_backend:reset(), - {ok, Client} = riak:local_client(), - BucketProps = riak_core_bucket:get_bucket(Bucket), - NewBucketProps = orddict:store(n_val, NVal, BucketProps), - riak_core_bucket:set_bucket(Bucket, NewBucketProps), - %% Create objects in bucket - GeneratedKeys = [list_to_binary(integer_to_list(X)) || X <- lists:seq(1, ObjectCount)], - [ok = Client:put(riak_object:new(Bucket, Key, <<"val">>)) || Key <- GeneratedKeys], - - %% Set the expected output based on if a - %% key filter is being used or not. - case KeyFilter of - none -> - ExpectedKeys = GeneratedKeys; - _ -> - ExpectedKeyFilter = - fun(K, Acc) -> - case KeyFilter(K) of - true -> - [K | Acc]; - false -> - Acc - end - end, - ExpectedKeys = lists:foldl(ExpectedKeyFilter, [], GeneratedKeys) - end, - %% Call start_link - Keys = start_link(ReqId, Bucket, KeyFilter, Timeout), - ?WHENFAIL( - begin - io:format("Bucket: ~p n_val: ~p ObjectCount: ~p KeyFilter: ~p~n", [Bucket, NVal, ObjectCount, KeyFilter]), - io:format("Expected Key Count: ~p Actual Key Count: ~p~n", - [length(ExpectedKeys), length(Keys)]), - io:format("Expected Keys: ~p~nActual Keys: ~p~n", - [ExpectedKeys, lists:sort(Keys)]) - end, - conjunction( - [ - {results, equals(lists:sort(Keys), lists:sort(ExpectedKeys))} - ])) - - end - )). - -%%==================================================================== -%% Wrappers -%%==================================================================== - -start_link(ReqId, Bucket, Filter, Timeout) -> - Sink = spawn(?MODULE, data_sink, [ReqId, [], false]), - From = {raw, ReqId, Sink}, - {ok, _FsmPid} = riak_core_coverage_fsm:start_link(riak_kv_keys_fsm, From, [Bucket, Filter, Timeout]), - wait_for_replies(Sink, ReqId). - -%%==================================================================== -%% Generators -%%==================================================================== - -g_bucket() -> - non_blank_string(). - -g_key_filter() -> - %% Create a key filter function. - %% There will always be at least 10 keys - %% due to the lower bound of object count - %% generator. - MatchKeys = [list_to_binary(integer_to_list(X)) || X <- lists:seq(1,10)], - KeyFilter = - fun(X) -> - lists:member(X, MatchKeys) - end, - frequency([{5, none}, {2, KeyFilter}]). - -g_n_val() -> - choose(1,5). - -g_object_count() -> - choose(10, 2000). - -g_reqid() -> - ?LET(X, noshrink(largeint()), abs(X)). - -g_timeout() -> - choose(10000, 60000). - -%%==================================================================== -%% Helpers -%%==================================================================== - -configure(load) -> - application:set_env(riak_kv, storage_backend, riak_kv_memory_backend), - application:set_env(riak_kv, test, true), - application:set_env(riak_kv, vnode_vclocks, true), -<<<<<<< HEAD - application:set_env(riak_kv, delete_mode, immediate); -======= - Out = application:set_env(riak_kv, delete_mode, immediate), - Out; ->>>>>>> master -configure(_) -> - ok. - -test() -> - test(100). - -test(N) -> - quickcheck(numtests(N, prop_basic_listkeys())). - -check() -> - check(prop_basic_listkeys(), current_counterexample()). - -data_sink(ReqId, KeyList, Done) -> - receive - {ReqId, From={_Pid,_Ref}, {keys, Keys}} -> - riak_kv_keys_fsm:ack_keys(From), - data_sink(ReqId, KeyList++Keys, false); - {ReqId, {keys, Keys}} -> - data_sink(ReqId, KeyList++Keys, false); - {ReqId, done} -> - data_sink(ReqId, KeyList, true); - {ReqId, Error} -> - ?debugFmt("Error occurred: ~p~n", [Error]), - data_sink(ReqId, [], true); - {keys, From, ReqId} -> - From ! {ok, ReqId, KeyList}; - {'done?', From, ReqId} -> - From ! {ok, ReqId, Done}, - data_sink(ReqId, KeyList, Done); - Other -> - ?debugFmt("Unexpected msg: ~p~n", [Other]), - data_sink(ReqId, KeyList, Done) - end. - -wait_for_replies(Sink, ReqId) -> - S = self(), - Sink ! {'done?', S, ReqId}, - receive - {ok, ReqId, true} -> - Sink ! {keys, S, ReqId}, - receive - {ok, ReqId, Keys} -> - Keys; - {ok, ORef, _} -> - ?debugFmt("Received keys for older run: ~p~n", [ORef]) - end; - {ok, ReqId, false} -> - timer:sleep(100), - wait_for_replies(Sink, ReqId); - {ok, ORef, _} -> - ?debugFmt("Received keys for older run: ~p~n", [ORef]) - end. - --endif. % EQC From 7574ed4db2fce220eecdd27d8f25ff755fdd50a0 Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Fri, 14 Dec 2012 16:49:28 +0000 Subject: [PATCH 12/25] Deleted some commented files and updated some docs --- src/riak_kv_vnode.erl | 26 -------------------------- src/riak_object.erl | 12 ++++-------- 2 files changed, 4 insertions(+), 34 deletions(-) diff --git a/src/riak_kv_vnode.erl b/src/riak_kv_vnode.erl index 0a3db6666f..72d47a00bc 100644 --- a/src/riak_kv_vnode.erl +++ b/src/riak_kv_vnode.erl @@ -818,7 +818,6 @@ prepare_put(#state{vnodeid=VId, {oldobj, OldObj1} -> {{false, OldObj1}, PutArgs}; {newobj, NewObj} -> -% VC = riak_object:vclock(NewObj), AMObj = enforce_allow_mult(NewObj, BProps), case IndexBackend of true -> @@ -828,16 +827,6 @@ prepare_put(#state{vnodeid=VId, false -> IndexSpecs = [] end, -% case PruneTime of -% undefined -> -% ObjToStore = AMObj; -% _ -> -% ObjToStore = -% riak_object:set_vclock(AMObj, -% vclock:prune(VC, -% PruneTime, -% BProps)) -% end, {{true, AMObj}, PutArgs#putargs{index_specs=IndexSpecs, is_index=IndexBackend}} end @@ -909,7 +898,6 @@ put_merge(false, true, _CurObj, UpdObj, _VId, _StartTime) -> % coord=false, LWW= {newobj, UpdObj}; put_merge(false, false, CurObj, UpdObj, _VId, _StartTime) -> % coord=false, LWW=false ResObj = riak_object:syntactic_merge(CurObj, UpdObj), -% case ResObj =:= CurObj of case dottedvv:equal(riak_object:vclock(ResObj), riak_object:vclock(CurObj)) of true -> {oldobj, CurObj}; @@ -922,20 +910,6 @@ put_merge(true, false, CurObj, UpdObj, VId, _StartTime) -> UpdObj1 = riak_object:update_vclock(UpdObj, CurObj, VId), ResObj = riak_object:syntactic_merge(CurObj, UpdObj1), {newobj, ResObj}. -% UpdVC = riak_object:vclock(UpdObj1), -% CurVC = riak_object:vclock(CurObj), -% -% %% Check the coord put will replace the existing object -% case vclock:get_counter(VId, UpdVC) > vclock:get_counter(VId, CurVC) andalso -% vclock:descends(CurVC, UpdVC) == false andalso -% vclock:descends(UpdVC, CurVC) == true of -% true -> -% {newobj, UpdObj1}; -% false -> -% %% If not, make sure it does -% {newobj, riak_object:increment_vclock( -% riak_object:merge(CurObj, UpdObj1), VId, StartTime)} -% end. %% @private do_get(_Sender, BKey, ReqID, diff --git a/src/riak_object.erl b/src/riak_object.erl index 2d963f4992..9b8d139e40 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -132,8 +132,9 @@ equal_contents([C1|R1],[C2|R2]) -> -% @spec reconcile([riak_object()], boolean()) -> riak_object() -% @doc Reconcile a list of riak objects. If AllowMultiple is true, +% @spec reconcile([riak_object()], [riak_object()], boolean()) -> riak_object() +% @doc Reconcile the object from the client and the object from the server. +% If AllowMultiple is true, % the riak_object returned may contain multiple values if Objects % contains sibling versions (objects that could not be syntactically % merged). If AllowMultiple is false, the riak_object returned will @@ -163,7 +164,7 @@ reconcile(Current, New, AllowMultiple) -> updatevalue=undefined}. -%% @spec reconcile([riak_object()]) -> [riak_object()] +%% @spec reconcile([riak_object()], [riak_object()]) -> [riak_object()] reconcile_sync(Current, New) -> ClockNew = vclock(New), {Curr, ClocksCurrent} = @@ -333,11 +334,6 @@ update_vclock(ObjectC=#r_object{}, ObjectR=#r_object{}, Id) -> Dvv = dottedvv:update(vclock(ObjectC), vclock(ObjectR), Id), riak_object:set_vclock(ObjectC,Dvv). -%% @doc Increment the entry for ClientId in O's vclock. -%-spec increment_vclock(riak_object(), vclock:vclock_node(), vclock:timestamp()) -> riak_object(). -%increment_vclock(Object=#r_object{}, ClientId, Timestamp) -> -% Object#r_object{vclock=vclock:increment(ClientId, Timestamp, Object#r_object.vclock)}. - %% @doc Prepare a list of index specifications %% to pass to the backend. This function is for %% the case where there is no existing object From 70dd894130b8db999cc6fa811e3b55d0dd850df9 Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Thu, 3 Jan 2013 12:25:41 +0000 Subject: [PATCH 13/25] Support for Compact Version of DVV. --- src/riak_kv_backup.erl | 6 +- src/riak_kv_delete.erl | 6 +- src/riak_kv_encoding_migrate.erl | 11 +- src/riak_kv_get_core.erl | 7 +- src/riak_kv_get_fsm.erl | 2 +- src/riak_kv_index_hashtree.erl | 4 +- src/riak_kv_pb_object.erl | 12 +- src/riak_kv_vnode.erl | 35 +-- src/riak_kv_wm_object.erl | 14 +- src/riak_kv_wm_utils.erl | 5 +- src/riak_object.erl | 385 +++++++++++++++++-------------- 11 files changed, 246 insertions(+), 241 deletions(-) diff --git a/src/riak_kv_backup.erl b/src/riak_kv_backup.erl index 0aeab87971..63d373394f 100644 --- a/src/riak_kv_backup.erl +++ b/src/riak_kv_backup.erl @@ -153,13 +153,11 @@ read_and_restore_function(Client, BinTerm) -> %% If the bucket name is an atom, convert it to a binary... make_binary_bucket(Bucket, Key, OriginalObj) when is_atom(Bucket) -> Bucket1 = list_to_binary(atom_to_list(Bucket)), - OriginalContents = riak_object:get_contents(OriginalObj), - OriginalVClock = riak_object:vclock(OriginalObj), + OriginalContents = riak_object:get_vclock(OriginalObj,true), % We can't change the bucket name without creating a new object... NewObj = riak_object:new(Bucket1, Key, placeholder), - NewObj1 = riak_object:set_contents(NewObj, OriginalContents), - _NewObj2 = riak_object:set_vclock(NewObj1, OriginalVClock); + _NewObj1 = riak_object:set_contents(NewObj, OriginalContents); %% If the bucket name is a binary, just pass it on through... make_binary_bucket(Bucket, _Key, Obj) when is_binary(Bucket) -> Obj. diff --git a/src/riak_kv_delete.erl b/src/riak_kv_delete.erl index 8d9a4cc96d..9ca13c223b 100644 --- a/src/riak_kv_delete.erl +++ b/src/riak_kv_delete.erl @@ -66,7 +66,7 @@ delete(ReqId,Bucket,Key,Options,Timeout,Client,ClientId,undefined) -> case C:get(Bucket,Key,[{r,R},{pr,PR},{timeout,Timeout}]) of {ok, OrigObj} -> RemainingTime = Timeout - (riak_core_util:moment() - RealStartTime), - delete(ReqId,Bucket,Key,Options,RemainingTime,Client,ClientId,riak_object:vclock(OrigObj)); + delete(ReqId,Bucket,Key,Options,RemainingTime,Client,ClientId,riak_object:get_vclocks(OrigObj, false)); {error, notfound} -> ?DTRACE(?C_DELETE_INIT1, [-2], []), Client ! {ReqId, {error, notfound}}; @@ -240,7 +240,7 @@ invalid_w_delete() -> Key = <<"testkey">>, Timeout = 60000, riak_kv_delete_sup:start_delete(node(), [RequestId, Bucket, Key, [{w,W}], - Timeout, self(), undefined, vclock:fresh()]), + Timeout, self(), undefined, riak_object:new_vclock()]), %% Wait for error response receive {_RequestId, Result} -> @@ -275,7 +275,7 @@ invalid_pw_delete() -> Key = <<"testkey">>, Timeout = 60000, riak_kv_delete_sup:start_delete(node(), [RequestId, Bucket, Key, - [{pw,PW}], Timeout, self(), undefined, vclock:fresh()]), + [{pw,PW}], Timeout, self(), undefined, riak_object:new_vclock()]), %% Wait for error response receive {_RequestId, Result} -> diff --git a/src/riak_kv_encoding_migrate.erl b/src/riak_kv_encoding_migrate.erl index 2fa179781c..782b3a1feb 100644 --- a/src/riak_kv_encoding_migrate.erl +++ b/src/riak_kv_encoding_migrate.erl @@ -248,9 +248,8 @@ decode_object(RO) -> copy_object(RO, B, K) -> {ok, RC} = riak:local_client(), NO1 = riak_object:new(B, K, <<>>), - NO2 = riak_object:set_vclock(NO1, riak_object:vclock(RO)), - NO3 = riak_object:set_contents(NO2, riak_object:get_contents(RO)), - RC:put(NO3). + NO2 = riak_object:set_contents(NO1, riak_object:get_vclock(RO,true)), + RC:put(NO2). %% Force writes to fail to test failure behavior precommit_fail(_) -> @@ -309,18 +308,16 @@ test_migration() -> {not_needed, [], []} = riak_kv_encoding_migrate:check_cluster(), C1 = riak_object:get_contents(O2), - V1 = riak_object:vclock(O2), C2 = riak_object:get_contents(O4), - V2 = riak_object:vclock(O4), {ok, MO1} = RC:get(<<"me@mine">>, <<"key">>), nearly_equal_contents(C1, riak_object:get_contents(MO1)), - true = vclock:descends(riak_object:vclock(MO1), V1), + true = riak_object:descendant(MO1, O2), {ok, MO2} = RC:get(<<"bucket">>, <<"key@">>), nearly_equal_contents(C2, riak_object:get_contents(MO2)), - true = vclock:descends(riak_object:vclock(MO2), V2), + true = riak_object:descendant(MO2, O4), %% Use precommit hook to test failure scenarios O7 = riak_object:new(<<"fail">>, <<"key%40">>, <<"value">>), diff --git a/src/riak_kv_get_core.erl b/src/riak_kv_get_core.erl index 91929d5df1..734289ea92 100644 --- a/src/riak_kv_get_core.erl +++ b/src/riak_kv_get_core.erl @@ -118,7 +118,7 @@ response(GetCore = #getcore{r = R, num_ok = NumOk, num_notfound = NumNotFound, ok -> Merged; % {ok, MObj} tombstone when DeletedVClock -> - {error, {deleted, riak_object:vclock(MObj)}}; + {error, {deleted, riak_object:get_vclock(MObj,false)}}; _ -> % tombstone or notfound {error, notfound} end; @@ -160,7 +160,7 @@ final_action(GetCore = #getcore{n = N, merged = Merged0, results = Results, []; _ -> % ok or tombstone [{Idx, outofdate} || {Idx, {ok, RObj}} <- Results, - strict_descendant(MObj, RObj)] ++ + riak_object:strict_descendant(MObj, RObj)] ++ [{Idx, notfound} || {Idx, {error, notfound}} <- Results] end, Action = case ReadRepairs of @@ -203,9 +203,6 @@ info(#getcore{num_ok = NumOks, num_fail = NumFail, results = Results}) -> %% Internal functions %% ==================================================================== -strict_descendant(O1, O2) -> - dottedvv:strict_descends(riak_object:vclock(O1),riak_object:vclock(O2)). - merge(Replies, AllowMult) -> RObjs = [RObj || {_I, {ok, RObj}} <- Replies], case RObjs of diff --git a/src/riak_kv_get_fsm.erl b/src/riak_kv_get_fsm.erl index b50a13c63a..745278543e 100644 --- a/src/riak_kv_get_fsm.erl +++ b/src/riak_kv_get_fsm.erl @@ -469,7 +469,7 @@ calculate_objsize(Bucket, Obj) -> Contents = riak_object:get_contents(Obj), size(Bucket) + size(riak_object:key(Obj)) + - size(term_to_binary(riak_object:vclock(Obj))) + + size(term_to_binary(riak_object:get_vclock(Obj,false))) + lists:sum([size(term_to_binary(MD)) + value_size(Value) || {MD, Value} <- Contents]). value_size(Value) when is_binary(Value) -> size(Value); diff --git a/src/riak_kv_index_hashtree.erl b/src/riak_kv_index_hashtree.erl index fb177d899f..a7b9ab59c7 100644 --- a/src/riak_kv_index_hashtree.erl +++ b/src/riak_kv_index_hashtree.erl @@ -360,8 +360,8 @@ load_built(#state{trees=Trees}) -> hash_object(RObjBin) -> %% Normalize the `riak_object' vector clock before hashing RObj = binary_to_term(RObjBin), - Vclock = riak_object:vclock(RObj), - UpdObj = riak_object:set_vclock(RObj, lists:sort(Vclock)), + Vclock = riak_object:get_vclock(RObj,true), + UpdObj = riak_object:set_contents(RObj, lists:sort(Vclock)), Hash = erlang:phash2(term_to_binary(UpdObj)), term_to_binary(Hash). diff --git a/src/riak_kv_pb_object.erl b/src/riak_kv_pb_object.erl index e92b7762a4..cdb1f5bfff 100644 --- a/src/riak_kv_pb_object.erl +++ b/src/riak_kv_pb_object.erl @@ -111,7 +111,7 @@ process(#rpbgetreq{bucket=B, key=K, r=R0, pr=PR0, notfound_ok=NFOk, make_option(notfound_ok, NFOk) ++ make_option(basic_quorum, BQ)) of {ok, O} -> - case erlify_rpbvc(VClock) == riak_object:vclock(O) of + case riak_object:equal_vclock(erlify_rpbvc(VClock),riak_object:get_vclock(O,false)) of true -> {reply, #rpbgetresp{unchanged = true}, State}; _ -> @@ -127,7 +127,7 @@ process(#rpbgetreq{bucket=B, key=K, r=R0, pr=PR0, notfound_ok=NFOk, riak_pb_kv_codec:encode_contents(Contents) end, {reply, #rpbgetresp{content = PbContent, - vclock = pbify_rpbvc(riak_object:vclock(O))}, State} + vclock = pbify_rpbvc(riak_object:get_vclock(O,false))}, State} end; {error, {deleted, TombstoneVClock}} -> %% Found a tombstone - return its vector clock so it can @@ -146,7 +146,7 @@ process(#rpbputreq{bucket=B, key=K, vclock=PbVC, {ok, _} when NoneMatch -> {error, "match_found", State}; {ok, O} when NotMod -> - case erlify_rpbvc(PbVC) == riak_object:vclock(O) of + case erlify_rpbvc(PbVC) == riak_object:get_vclock(O,false) of true -> process(Req#rpbputreq{if_not_modified=undefined, if_none_match=undefined}, @@ -215,7 +215,7 @@ process(#rpbputreq{bucket=B, key=K, vclock=PbVC, content=RpbContent, riak_pb_kv_codec:encode_contents(Contents) end, PutResp = #rpbputresp{content = PbContents, - vclock = pbify_rpbvc(riak_object:vclock(Obj)), + vclock = pbify_rpbvc(riak_object:get_vclock(Obj,false)), key = ReturnKey }, {reply, PutResp, State}; @@ -289,9 +289,9 @@ make_option(K, V) -> %% Convert a vector clock to erlang erlify_rpbvc(undefined) -> - vclock:fresh(); + riak_object:new_vclock(); erlify_rpbvc(<<>>) -> - vclock:fresh(); + riak_object:new_vclock(); erlify_rpbvc(PbVc) -> binary_to_term(zlib:unzip(PbVc)). diff --git a/src/riak_kv_vnode.erl b/src/riak_kv_vnode.erl index 72d47a00bc..b3a18baed1 100644 --- a/src/riak_kv_vnode.erl +++ b/src/riak_kv_vnode.erl @@ -872,33 +872,19 @@ enforce_allow_mult(Obj, BProps) -> case proplists:get_value(allow_mult, BProps) of true -> Obj; _ -> - case riak_object:get_contents(Obj) of - [_] -> Obj; - Mult -> - Clocks = [C || {_,_,C} <- Mult], - Clock = dottedvv:merge(Clocks), - {MD, V, _VC} = select_newest_content(Mult), - riak_object:set_contents(Obj, [{MD, V, Clock}]) + case riak_object:value_count(Obj) of + 1 -> Obj; + _ -> + riak_object:set_lww(Obj) end end. -%% @private -%% choose the latest content to store for the allow_mult=false case -select_newest_content(Mult) -> - hd(lists:sort( - fun({MD0, _, _}, {MD1, _, _}) -> - riak_core_util:compare_dates( - dict:fetch(<<"X-Riak-Last-Modified">>, MD0), - dict:fetch(<<"X-Riak-Last-Modified">>, MD1)) - end, - Mult)). - %% @private put_merge(false, true, _CurObj, UpdObj, _VId, _StartTime) -> % coord=false, LWW=true {newobj, UpdObj}; put_merge(false, false, CurObj, UpdObj, _VId, _StartTime) -> % coord=false, LWW=false ResObj = riak_object:syntactic_merge(CurObj, UpdObj), - case dottedvv:equal(riak_object:vclock(ResObj), riak_object:vclock(CurObj)) of + case riak_object:equal_vclock(ResObj,CurObj) of true -> {oldobj, CurObj}; false -> @@ -906,9 +892,10 @@ put_merge(false, false, CurObj, UpdObj, _VId, _StartTime) -> % coord=false, LWW= end; put_merge(true, true, _CurObj, UpdObj, VId, StartTime) -> % coord=false, LWW=true {newobj, riak_object:increment_vclock(UpdObj, VId, StartTime)}; -put_merge(true, false, CurObj, UpdObj, VId, _StartTime) -> - UpdObj1 = riak_object:update_vclock(UpdObj, CurObj, VId), - ResObj = riak_object:syntactic_merge(CurObj, UpdObj1), +put_merge(true, false, CurObj, UpdObj, VId, StartTime) -> + CurObj1 = riak_object:update(CurObj), + UpdObj1 = riak_object:update(UpdObj), + ResObj = riak_object:update_vclock(UpdObj1, CurObj1, VId, StartTime), {newobj, ResObj}. %% @private @@ -1092,8 +1079,8 @@ do_get_vclocks(KeyList,_State=#state{mod=Mod,modstate=ModState}) -> %% @private do_get_vclock({Bucket, Key}, Mod, ModState) -> case Mod:get(Bucket, Key, ModState) of - {error, not_found, _UpdModState} -> vclock:fresh(); - {ok, Val, _UpdModState} -> riak_object:vclock(binary_to_term(Val)) + {error, not_found, _UpdModState} -> riak_object:new_vclock(); + {ok, Val, _UpdModState} -> riak_object:get_vclock(binary_to_term(Val),false) end. %% @private diff --git a/src/riak_kv_wm_object.erl b/src/riak_kv_wm_object.erl index cefe076efe..f30f34543c 100644 --- a/src/riak_kv_wm_object.erl +++ b/src/riak_kv_wm_object.erl @@ -775,16 +775,14 @@ select_doc(#ctx{doc={ok, Doc}, vtag=Vtag}) -> case riak_object:get_update_value(Doc) of undefined -> case riak_object:get_contents(Doc) of - [Single] -> {MD,V,_Clock} = Single, - {MD,V}; + [Single] -> Single; Mult -> case lists:dropwhile( - fun({M,_,_}) -> + fun({M,_}) -> dict:fetch(?MD_VTAG, M) /= Vtag end, Mult) of - [Match|_] -> {MD,V,_Clock} = Match, - {MD,V}; + [Match|_] -> Match; [] -> multiple_choices end end; @@ -806,7 +804,7 @@ encode_vclock_header(RD, #ctx{doc={error, {deleted, VClock}}}) -> %% into something suitable for an HTTP header vclock_header(Doc) -> {?HEAD_VCLOCK, - encode_vclock(riak_object:vclock(Doc))}. + encode_vclock(riak_object:get_vclock(Doc, false))}. encode_vclock(VClock) -> binary_to_list(base64:encode(zlib:zip(term_to_binary(VClock)))). @@ -817,7 +815,7 @@ encode_vclock(VClock) -> %% vclock is returned. decode_vclock_header(RD) -> case wrq:get_req_header(?HEAD_VCLOCK, RD) of - undefined -> dottedvv:fresh(); + undefined -> riak_object:new_vclock(); Head -> binary_to_term(zlib:unzip(base64:decode(Head))) end. @@ -866,7 +864,7 @@ generate_etag(RD, Ctx) -> {dict:fetch(?MD_VTAG, MD), RD, Ctx}; multiple_choices -> {ok, Doc} = Ctx#ctx.doc, - <> = crypto:md5(term_to_binary(riak_object:vclock(Doc))), + <> = crypto:md5(term_to_binary(riak_object:get_vclock(Doc,false))), {riak_core_util:integer_to_list(ETag, 62), RD, Ctx} end. diff --git a/src/riak_kv_wm_utils.erl b/src/riak_kv_wm_utils.erl index c5459b43e1..5b4e185490 100644 --- a/src/riak_kv_wm_utils.erl +++ b/src/riak_kv_wm_utils.erl @@ -92,7 +92,7 @@ default_encodings() -> %% @spec multipart_encode_body(string(), binary(), {dict(), binary()}) -> iolist() %% @doc Produce one part of a multipart body, representing one sibling %% of a multi-valued document. -multipart_encode_body(Prefix, Bucket, {MD, V, Clock}, APIVersion) -> +multipart_encode_body(Prefix, Bucket, {MD, V}, APIVersion) -> Links1 = case dict:find(?MD_LINKS, MD) of {ok, Ls} -> Ls; error -> [] @@ -122,7 +122,6 @@ multipart_encode_body(Prefix, Bucket, {MD, V, Clock}, APIVersion) -> Rfc1123 end, "\r\n", - "VClock: ",encode_vclock(Clock),"\r\n", case dict:find(?MD_DELETED, MD) of {ok, "true"} -> [?HEAD_DELETED, ": true\r\n"]; @@ -150,7 +149,7 @@ multipart_encode_body(Prefix, Bucket, {MD, V, Clock}, APIVersion) -> %% into something suitable for an HTTP header vclock_header(Doc) -> {?HEAD_VCLOCK, - encode_vclock(riak_object:vclock(Doc))}. + encode_vclock(riak_object:get_vclock(Doc,false))}. encode_vclock(VClock) -> binary_to_list(base64:encode(zlib:zip(term_to_binary(VClock)))). diff --git a/src/riak_object.erl b/src/riak_object.erl index 9b8d139e40..241177e618 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -32,36 +32,37 @@ -type key() :: binary(). -type bucket() :: binary(). -%% -type bkey() :: {bucket(), key()}. -type value() :: term(). -record(r_content, { metadata :: dict(), - value :: term(), - dvvclock :: dottedvv:dottedvv() + value :: term() }). %% Opaque container for Riak objects, a.k.a. riak_object() -record(r_object, { bucket :: bucket(), key :: key(), - contents :: [#r_content{}], + contents :: compactdvv:clock(), % [{id, count, [r_content]}] updatemetadata=dict:store(clean, true, dict:new()) :: dict(), updatevalue :: term() }). + -opaque riak_object() :: #r_object{}. +-opaque riak_content() :: #r_content{}. -type index_op() :: add | remove. -type index_value() :: integer() | binary(). -define(MAX_KEY_SIZE, 65536). --export([new/3, new/4, ensure_robject/1, equal/2, reconcile/3, reconcile/2]). --export([increment_vclock/2, increment_vclock/3, update_vclock/3]). --export([key/1, get_metadata/1, get_metadatas/1, get_values/1, get_value/1]). --export([vclock/1, update_value/2, update_metadata/2, bucket/1, value_count/1]). +-export([new/3, new/4, ensure_robject/1, equal/2, new_vclock/0, equal_vclock/2]). +-export([increment_vclock/2, increment_vclock/3, update_vclock/4, update_vclock/3]). +-export([reconcile/2, descendant/2, strict_descendant/2, key/1]). +-export([get_metadata/1, get_metadatas/1, get_values/1, get_value/1]). +-export([get_vclock/2, update_value/2, update_metadata/2, bucket/1, value_count/1]). -export([get_update_metadata/1, get_update_value/1, get_contents/1]). --export([apply_updates/1, syntactic_merge/2]). +-export([apply_updates/1, syntactic_merge/2, compare_content_dates/2, set_lww/1]). -export([to_json/1, from_json/1]). -export([index_specs/1, diff_index_specs/2]). -export([set_contents/2, set_vclock/2]). %% INTERNAL, only for riak_* @@ -88,26 +89,41 @@ new(B, K, V, MD) when is_binary(B), is_binary(K) -> false -> case MD of no_initial_metadata -> - Contents = [#r_content{metadata=dict:new(), value=V, dvvclock = dottedvv:fresh()}], + Contents = compactdvv:new([#r_content{metadata=dict:new(), value=V}]), #r_object{bucket=B,key=K, contents=Contents}; _ -> - Contents = [#r_content{metadata=MD, value=V, dvvclock = dottedvv:fresh()}], + Contents = compactdvv:new([#r_content{metadata=MD, value=V}]), #r_object{bucket=B,key=K,updatemetadata=MD, contents=Contents} end end. +-spec new_vclock() -> compactdvv:clock(). +new_vclock() -> compactdvv:new(). + %% Ensure the incoming term is a riak_object. -spec ensure_robject(any()) -> riak_object(). ensure_robject(Obj = #r_object{}) -> Obj. --spec equal(riak_object(), riak_object()) -> true | false. +-spec strict_descendant(riak_object(), riak_object()) -> boolean(). +strict_descendant(#r_object{contents=C1},#r_object{contents=C2}) -> + compactdvv:strict_descendant(C1,C2). + +-spec descendant(riak_object(), riak_object()) -> boolean(). +descendant(#r_object{contents=C1},#r_object{contents=C2}) -> + compactdvv:equal(C1,C2) orelse compactdvv:strict_descendant(C1,C2). + +-spec equal_vclock(riak_object(), riak_object()) -> boolean(). +equal_vclock(#r_object{contents=C1},#r_object{contents=C2}) -> + compactdvv:equal(C1,C2). + +-spec equal(riak_object(), riak_object()) -> boolean(). %% @doc Deep (expensive) comparison of Riak objects. equal(Obj1,Obj2) -> (Obj1#r_object.bucket =:= Obj2#r_object.bucket) andalso (Obj1#r_object.key =:= Obj2#r_object.key) - andalso dottedvv:equal(vclock(Obj1),vclock(Obj2)) + andalso equal_vclock(Obj1,Obj2) andalso equal2(Obj1,Obj2). equal2(Obj1,Obj2) -> UM1 = lists:keysort(1, dict:to_list(Obj1#r_object.updatemetadata)), @@ -115,8 +131,8 @@ equal2(Obj1,Obj2) -> (UM1 =:= UM2) andalso (Obj1#r_object.updatevalue =:= Obj2#r_object.updatevalue) andalso begin - Cont1 = lists:sort(Obj1#r_object.contents), - Cont2 = lists:sort(Obj2#r_object.contents), + Cont1 = lists:sort(get_contents(Obj1)), + Cont2 = lists:sort(get_contents(Obj2)), equal_contents(Cont1,Cont2) end. equal_contents([],[]) -> true; @@ -127,12 +143,10 @@ equal_contents([C1|R1],[C2|R2]) -> MD2 = lists:keysort(1, dict:to_list(C2#r_content.metadata)), (MD1 =:= MD2) andalso (C1#r_content.value =:= C2#r_content.value) - andalso dottedvv:equal(C1#r_content.dvvclock,C1#r_content.dvvclock) andalso equal_contents(R1,R2). -% @spec reconcile([riak_object()], [riak_object()], boolean()) -> riak_object() % @doc Reconcile the object from the client and the object from the server. % If AllowMultiple is true, % the riak_object returned may contain multiple values if Objects @@ -140,67 +154,53 @@ equal_contents([C1|R1],[C2|R2]) -> % merged). If AllowMultiple is false, the riak_object returned will % contain the value of the most-recently-updated object, as per the % X-Riak-Last-Modified header. -reconcile(Objs, AllowMultiple) -> reconcile(Objs, [], AllowMultiple). -reconcile(Current, New, AllowMultiple) -> - RObjs = reconcile_sync(Current, New), - AllContents = lists:flatten([O#r_object.contents || O <- RObjs]), +-spec reconcile([riak_object()], boolean()) -> riak_object(). +reconcile(RObjs, AllowMultiple) -> + AllContents = [O#r_object.contents || O <- RObjs], + SyncedContents = compactdvv:sync(AllContents), Contents = case AllowMultiple of - false -> - Cont = most_recent_content(AllContents), - case length(AllContents) of - 1 -> [Cont]; - _ -> AllClocks = lists:flatten([vclock(O) || O <- RObjs]), - M = Cont#r_content.metadata, - V = Cont#r_content.value, - C = dottedvv:merge(AllClocks), - [#r_content{metadata=M, value=V, dvvclock=C}] - end; - true -> - lists:usort(AllContents) + false -> compactdvv:lww(SyncedContents); + true -> SyncedContents end, HdObj = hd(RObjs), HdObj#r_object{contents=Contents, updatemetadata=dict:store(clean, true, dict:new()), updatevalue=undefined}. +-spec syntactic_merge(riak_object(), riak_object()) -> riak_object(). +syntactic_merge(CurrentObj, NewObj) -> + %% Paranoia in case objects were incorrectly stored + %% with update information. Vclock is not updated + %% but since no data is lost the objects will be + %% fixed if rewritten. + UCurr = update(CurrentObj), + UNew = update(NewObj), + reconcile([UNew, UCurr], true). -%% @spec reconcile([riak_object()], [riak_object()]) -> [riak_object()] -reconcile_sync(Current, New) -> - ClockNew = vclock(New), - {Curr, ClocksCurrent} = - case is_list(Current) of - true -> {Current, lists:flatten([vclock(O) || O <- Current])}; - false -> {[Current], vclock(Current)} - end, - SyncClocks = dottedvv:sync(ClocksCurrent, ClockNew), - AllObjs = Curr ++ [New], - Objs = - [[Obj || Obj <- AllObjs, (dottedvv:descends(vclock(Obj), C))] - || C <- SyncClocks], - remove_duplicate_objects(lists:flatten(Objs)). - -remove_duplicate_objects(Os) -> rem_dup_objs(Os,[]). -rem_dup_objs([],Acc) -> Acc; -rem_dup_objs([O|Rest],Acc) -> - EqO = [AO || AO <- Acc, riak_object:equal(AO,O) =:= true], - case EqO of - [] -> rem_dup_objs(Rest,[O|Acc]); - _ -> rem_dup_objs(Rest,Acc) - end. -most_recent_content(AllContents) -> - hd(lists:sort(fun compare_content_dates/2, AllContents)). +-spec set_lww(riak_object()) -> riak_object(). +set_lww(Object=#r_object{contents=C}) -> + set_contents(Object,most_recent_content(C)). + +-spec most_recent_content(riak_content()) -> riak_content(). +most_recent_content(Contents) -> + compactdvv:lww(fun riak_object:compare_content_dates/2, Contents). + +-spec compare_content_dates(riak_content(), riak_content()) -> boolean(). +compare_content_dates(C1, C2) -> + compare_metadata(C1#r_content.metadata,C2#r_content.metadata). -compare_content_dates(C1,C2) -> - D1 = dict:fetch(<<"X-Riak-Last-Modified">>, C1#r_content.metadata), - D2 = dict:fetch(<<"X-Riak-Last-Modified">>, C2#r_content.metadata), +-spec compare_metadata(dict(), dict()) -> boolean(). +compare_metadata(MD1,MD2) -> + D1 = dict:fetch(<<"X-Riak-Last-Modified">>, MD1), + D2 = dict:fetch(<<"X-Riak-Last-Modified">>, MD2), %% true if C1 was modifed later than C2 Cmp1 = riak_core_util:compare_dates(D1, D2), %% true if C2 was modifed later than C1 Cmp2 = riak_core_util:compare_dates(D2, D1), %% check for deleted objects - Del1 = dict:is_key(<<"X-Riak-Deleted">>, C1#r_content.metadata), - Del2 = dict:is_key(<<"X-Riak-Deleted">>, C2#r_content.metadata), + Del1 = dict:is_key(<<"X-Riak-Deleted">>, MD1), + Del2 = dict:is_key(<<"X-Riak-Deleted">>, MD2), SameDate = (Cmp1 =:= Cmp2), case {SameDate, Del1, Del2} of @@ -213,126 +213,159 @@ compare_content_dates(C1,C2) -> _ -> %% Dates equal and either both present or both deleted, compare %% by opaque contents. - C1 < C2 + MD1 < MD2 end. -% @spec apply_updates(riak_object()) -> riak_object() + +-spec is_updated(riak_object()) -> boolean(). +is_updated(_Object=#r_object{updatemetadata=M,updatevalue=V}) -> + case dict:find(clean, M) of + error -> true; + {ok,_} -> + case V of + undefined -> false; + _ -> true + end + end. + +-spec update(riak_object()) -> riak_object(). +update(Obj) -> + case is_updated(Obj) of + true -> apply_updates(Obj); + false -> Obj + end. + + % @doc Promote pending updates (made with the update_value() and % update_metadata() calls) to this riak_object. +-spec apply_updates(riak_object()) -> riak_object(). apply_updates(Object=#r_object{}) -> - VL = case Object#r_object.updatevalue of + C = case Object#r_object.updatevalue of undefined -> - [{C#r_content.value, C#r_content.dvvclock} || C <- Object#r_object.contents]; + get_vclock(Object,true); _ -> - Clocks = vclock(Object), - Clock = dottedvv:merge(Clocks), - [{Object#r_object.updatevalue, Clock}] + MD = hd(get_metadatas(Object)), + VL = Object#r_object.updatevalue, + Rcont = #r_content{metadata=MD,value=VL}, + compactdvv:set_value(get_vclock(Object,false),Rcont) end, - MD = case dict:find(clean, Object#r_object.updatemetadata) of - {ok,_} -> - MDs = [C#r_content.metadata || C <- Object#r_object.contents], - case Object#r_object.updatevalue of - undefined -> MDs; - _ -> [hd(MDs)] - end; - error -> - [dict:erase(clean,Object#r_object.updatemetadata) || _X <- VL] - end, - Contents = [#r_content{metadata=M,value=V,dvvclock=C} || {M,{V,C}} <- lists:zip(MD, VL)], - Object#r_object{contents=Contents, + case dict:find(clean, Object#r_object.updatemetadata) of + error -> + [dict:erase(clean,Object#r_object.updatemetadata) || _X <- lists:seq(1,compactdvv:value_count(C))] + end, + Object#r_object{contents=C, updatemetadata=dict:store(clean, true, dict:new()), updatevalue=undefined}. -%% @spec bucket(riak_object()) -> bucket() %% @doc Return the containing bucket for this riak_object. +-spec bucket(riak_object()) -> bucket(). bucket(#r_object{bucket=Bucket}) -> Bucket. -%% @spec key(riak_object()) -> key() %% @doc Return the key for this riak_object. +-spec key(riak_object()) -> key(). key(#r_object{key=Key}) -> Key. -%% @spec vclock(riak_object()) -> [dottedvv:dottedvv()] -%% @doc Return the dotted version vector(s) for this riak_object. -vclock([]) -> {}; -vclock(#r_object{contents=C}) -> [Content#r_content.dvvclock || Content <- C]. +%% @doc Return the logical clock for this riak_object. +-spec get_vclock(riak_object(), boolean()) -> compactdvv:clock(). +get_vclock(#r_object{contents=Clock}, _WithContents = true) -> Clock; +get_vclock(#r_object{contents=Clock}, _WithContents = false) -> compactdvv:join(Clock). -%% @spec value_count(riak_object()) -> non_neg_integer() %% @doc Return the number of values (siblings) of this riak_object. -value_count(#r_object{contents=Contents}) -> length(Contents). +-spec value_count(riak_object()) -> non_neg_integer(). +value_count(#r_object{contents=Contents}) -> compactdvv:value_count(Contents). -%% @spec get_contents(riak_object()) -> [{dict(), value()}] %% @doc Return the contents (a list of {metadata, value} tuples) for %% this riak_object. -get_contents(#r_object{contents=Contents}) -> - [{Content#r_content.metadata, Content#r_content.value, Content#r_content.dvvclock} || - Content <- Contents]. +-spec get_contents(riak_object()) -> [riak_content()]. +get_contents(#r_object{contents=Contents}) -> compactdvv:get_values(Contents). -%% @spec get_metadata(riak_object()) -> dict() %% @doc Assert that this riak_object has no siblings and return its associated %% metadata. This function will fail with a badmatch error if the %% object has siblings (value_count() > 1). -get_metadata(O=#r_object{}) -> +-spec get_metadata(riak_object()) -> dict(). +get_metadata(#r_object{contents=Contents}) -> % this blows up intentionally (badmatch) if more than one content value! - [{Metadata,_V,_C}] = get_contents(O), - Metadata. + 1 = compactdvv:value_count(Contents), + V = compactdvv:get_last_value(Contents), + V#r_content.metadata. -%% @spec get_metadatas(riak_object()) -> [dict()] %% @doc Return a list of the metadata values for this riak_object. -get_metadatas(#r_object{contents=Contents}) -> - [Content#r_content.metadata || Content <- Contents]. +-spec get_metadatas(riak_object()) -> [dict()]. +get_metadatas(#r_object{contents=C}) -> + [V#r_content.metadata || V <- compactdvv:get_values(C)]. -%% @spec get_values(riak_object()) -> [value()] %% @doc Return a list of object values for this riak_object. -get_values(#r_object{contents=C}) -> [Content#r_content.value || Content <- C]. +-spec get_values(riak_object()) -> [value()]. +get_values(#r_object{contents=C}) -> + [V#r_content.value || V <- compactdvv:get_values(C)]. -%% @spec get_value(riak_object()) -> value() %% @doc Assert that this riak_object has no siblings and return its associated %% value. This function will fail with a badmatch error if the object %% has siblings (value_count() > 1). -get_value(Object=#r_object{}) -> +-spec get_value(riak_object()) -> value(). +get_value(#r_object{contents=C}) -> % this blows up intentionally (badmatch) if more than one content value! - [{_M,Value,_C}] = get_contents(Object), - Value. + 1 = compactdvv:value_count(C), + V = compactdvv:get_last_value(C), + V#r_content.value. -%% @spec update_metadata(riak_object(), dict()) -> riak_object() %% @doc Set the updated metadata of an object to M. +-spec update_metadata(riak_object(), dict()) -> riak_object(). update_metadata(Object=#r_object{}, M) -> Object#r_object{updatemetadata=dict:erase(clean, M)}. -%% @spec update_value(riak_object(), value()) -> riak_object() %% @doc Set the updated value of an object to V +-spec update_value(riak_object(), value()) -> riak_object(). update_value(Object=#r_object{}, V) -> Object#r_object{updatevalue=V}. -%% @spec get_update_metadata(riak_object()) -> dict() %% @doc Return the updated metadata of this riak_object. +-spec get_update_metadata(riak_object()) -> dict(). get_update_metadata(#r_object{updatemetadata=UM}) -> UM. -%% @spec get_update_value(riak_object()) -> value() %% @doc Return the updated value of this riak_object. +-spec get_update_value(riak_object()) -> value(). get_update_value(#r_object{updatevalue=UV}) -> UV. -%% @spec set_vclock(riak_object(), vclock:vclock()) -> riak_object() %% @doc INTERNAL USE ONLY. Set the vclock of riak_object O to V. +-spec set_vclock(riak_object(), compactdvv:clock()) -> riak_object(). set_vclock(Object=#r_object{contents=Contents}, Clock) -> - [C|_] = Contents, - C2 = C#r_content{dvvclock = Clock}, - Object#r_object{contents=[C2]}. + 1 = compactdvv:value_count(Contents), + V = compactdvv:get_last_value(Contents), + C = compactdvv:set_value(Clock,V), + Object#r_object{contents=C}. -%% @doc Increment the entry for Id in O's vclock (ignore timestamp since we are not pruning). --spec increment_vclock(riak_object(), dottedvv:id(), dottedvv:timestamp()) -> riak_object(). -increment_vclock(Object, Id, _Timestamp) -> increment_vclock(Object, Id). +%% @doc INTERNAL USE ONLY. Set the contents of riak_object +%% to the Contents. Normal clients should use the +%% set_update_[value|metadata]() + apply_updates() method for changing +%% object contents. +-spec set_contents(riak_object(), riak_content()) -> riak_object(). +set_contents(Object=#r_object{}, Contents) -> + Object#r_object{contents=Contents}. %% @doc Increment the entry for Id in O's vclock. --spec increment_vclock(riak_object(), dottedvv:id()) -> riak_object(). -increment_vclock(Object=#r_object{}, Id) -> - Dvv = dottedvv:increment(Id, vclock(Object)), - riak_object:set_vclock(Object, Dvv). +-spec increment_vclock(riak_object(), any(), non_neg_integer()) -> riak_object(). +increment_vclock(Object=#r_object{}, Id, _TS) -> + increment_vclock(Object, Id). + +%% @doc Increment the entry for Id in O's vclock (ignore timestamp since we are not pruning). +-spec increment_vclock(riak_object(), any()) -> riak_object(). +increment_vclock(Object=#r_object{contents=Conts}, Id) -> + Value = #r_content{} = compactdvv:get_last_value(Conts), + C = compactdvv:syncupdate(Conts, Id, Value), + set_contents(Object, C). + +-spec update_vclock(riak_object(), riak_object(), any()) -> riak_object(). +update_vclock(ObjectC=#r_object{contents=ContC}, #r_object{contents=ContR}, Id) -> + Value = #r_content{} = compactdvv:get_last_value(ContC), + C = compactdvv:syncupdate(ContC, ContR, Id, Value), + set_contents(ObjectC, C). + +-spec update_vclock(riak_object(), riak_object(), any(), non_neg_integer()) -> riak_object(). +update_vclock(ObjectC=#r_object{}, ObjectR=#r_object{}, Id, _TS) -> + increment_vclock(ObjectC, ObjectR, Id). + --spec update_vclock(riak_object(), riak_object(), dottedvv:id()) -> riak_object(). -update_vclock(ObjectC=#r_object{}, ObjectR=#r_object{}, Id) -> - Dvv = dottedvv:update(vclock(ObjectC), vclock(ObjectR), Id), - riak_object:set_vclock(ObjectC,Dvv). %% @doc Prepare a list of index specifications %% to pass to the backend. This function is for @@ -388,26 +421,36 @@ index_data(Obj) -> assemble_index_specs(Indexes, IndexOp) -> [{IndexOp, Index, Value} || {Index, Value} <- Indexes]. -%% @spec set_contents(riak_object(), [{dict(), value()}]) -> riak_object() -%% @doc INTERNAL USE ONLY. Set the contents of riak_object to the -%% {Metadata, Value, Clock} pairs in MVs. Normal clients should use the -%% set_update_[value|metadata]() + apply_updates() method for changing -%% object contents. -set_contents(Object=#r_object{}, MVs) when is_list(MVs) -> - Object#r_object{contents=[#r_content{metadata=M, value=V, dvvclock=C} || {M, V, C} <- MVs]}. -%% @spec to_json(riak_object()) -> {struct, list(any())} %% @doc Converts a riak_object into its JSON equivalent +-spec to_json(riak_object()) -> {struct, list(any())}. to_json(Obj=#r_object{}) -> - {struct, [{<<"bucket">>, riak_object:bucket(Obj)}, - {<<"key">>, riak_object:key(Obj)}, - {<<"values">>, - [{struct, - [{<<"metadata">>, jsonify_metadata(MD)}, - {<<"data">>, V}, - {<<"vclock">>, riak_kv_wm_utils:encode_vclock(C)}]} - || {MD, V, C} <- riak_object:get_contents(Obj) - ]}]}. + {struct, + [ + {<<"bucket">>, bucket(Obj)}, + {<<"key">>, key(Obj)}, + {<<"contents">>, + [ + {struct, + [ + {<<"id">>, riak_kv_wm_utils:encode_vclock(Id)}, + {<<"counter">>, riak_kv_wm_utils:encode_vclock(Counter)}, + {<<"values">>, + [ + {struct, + [ + {<<"metadata">>, jsonify_metadata(Rcont#r_content.metadata)}, + {<<"value">>, Rcont#r_content.value} + ] + } || Rcont <- Values + ] + } + ] + } || {Id, Counter, Values} <- get_vclock(Obj,true) + ] + } + ] + }. -spec from_json(any()) -> riak_object(). from_json({struct, Obj}) -> @@ -415,9 +458,9 @@ from_json({struct, Obj}) -> from_json(Obj) -> Bucket = proplists:get_value(<<"bucket">>, Obj), Key = proplists:get_value(<<"key">>, Obj), - [{struct, Values}] = proplists:get_value(<<"values">>, Obj), - RObj0 = riak_object:new(Bucket, Key, <<"">>), - riak_object:set_contents(RObj0, dejsonify_values(Values, [])). + Contents = proplists:get_value(<<"contents">>, Obj), + RObj0 = new(Bucket, Key, <<"">>), + set_contents(RObj0, dejsonify_contents(Contents, [])). jsonify_metadata(MD) -> MDJS = fun({LastMod, Now={_,_,_}}) -> @@ -476,10 +519,23 @@ jsonify_proplist(List) -> end end, dict:new(), List)). + +dejsonify_contents([], Accum) -> + lists:reverse(Accum); +dejsonify_contents([{struct, [ + {<<"id">>, Id}, + {<<"counter">>, Counter}, + {<<"values">>, Values}]} | T], Accum) -> + V = dejsonify_values(Values), + dejsonify_contents(T, [{Id,Counter,V}|Accum]). + + +dejsonify_values(V) -> dejsonify_values(V,[]). dejsonify_values([], Accum) -> lists:reverse(Accum); -dejsonify_values([{<<"metadata">>, {struct, MD0}}, - {<<"data">>, D}, {<<"vclock">>, C} | T], Accum) -> +dejsonify_values([{struct, [ + {<<"metadata">>, {struct, MD0}}, + {<<"value">>, V}]} | T], Accum) -> Converter = fun({Key, Val}) -> case Key of <<"Links">> -> @@ -496,8 +552,7 @@ dejsonify_values([{<<"metadata">>, {struct, MD0}}, end end, MD = dict:from_list([Converter(KV) || KV <- MD0]), - Clock = binary_to_term(zlib:unzip(base64:decode(C))), - dejsonify_values(T, [{MD, D, Clock}|Accum]). + dejsonify_values(T, [#r_content{metadata=MD, value=V}|Accum]). %% @doc convert structs back into proplists dejsonify_meta_value({struct, PList}) -> @@ -509,34 +564,8 @@ dejsonify_meta_value({struct, PList}) -> [{Key, dejsonify_meta_value(V)}|Acc] end, [], PList); dejsonify_meta_value(Value) -> Value. - - -is_updated(_Object=#r_object{updatemetadata=M,updatevalue=V}) -> - case dict:find(clean, M) of - error -> true; - {ok,_} -> - case V of - undefined -> false; - _ -> true - end - end. - -syntactic_merge(CurrentObject, NewObject) -> - %% Paranoia in case objects were incorrectly stored - %% with update information. Vclock is not updated - %% but since no data is lost the objects will be - %% fixed if rewritten. - UpdatedNew = case is_updated(NewObject) of - true -> apply_updates(NewObject); - false -> NewObject - end, - UpdatedCurr = case is_updated(CurrentObject) of - true -> apply_updates(CurrentObject); - false -> CurrentObject - end, - reconcile(UpdatedCurr, UpdatedNew, true). -ifdef(TEST). @@ -580,7 +609,7 @@ merge2_test() -> O1 = riak_object:increment_vclock(object_test(), node1), O2 = riak_object:increment_vclock(riak_object:new(B,K,V), node2), O3 = riak_object:syntactic_merge(O1, O2), - [node1, node2] = lists:sort([N || {_,{N,_}} <- riak_object:vclock(O3)]), + [node1, node2] = lists:sort([N || {_,{N,_}} <- riak_object:get_vclock(O3,false)]), 2 = riak_object:value_count(O3). merge3_test() -> @@ -763,7 +792,7 @@ jsonify_round_trip_test() -> O2 = from_json(to_json(O)), ?assertEqual(bucket(O), bucket(O2)), ?assertEqual(key(O), key(O2)), - ?assert(dottedvv:equal(vclock(O), vclock(O2))), + ?assert(compactdvv:equal(get_vclock(O,true), get_vclock(O2,true))), ?assertEqual(lists:sort(Meta), lists:sort(dict:fetch(?MD_USERMETA, get_metadata(O2)))), ?assertEqual(Links, dict:fetch(?MD_LINKS, get_metadata(O2))), ?assertEqual(lists:sort(Indexes), lists:sort(index_data(O2))), From bfd1c04ea1765fabe173d72b4d2aa116125b7f84 Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Thu, 3 Jan 2013 12:35:32 +0000 Subject: [PATCH 14/25] Updated rebar to use the correct branch of riak_core. --- rebar.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rebar.config b/rebar.config index 9d1211000d..c8b0967129 100644 --- a/rebar.config +++ b/rebar.config @@ -9,7 +9,7 @@ ]}. {deps, [ - {riak_core, ".*", {git, "git://github.com/ricardobcl/riak_core", "master"}}, + {riak_core, ".*", {git, "git://github.com/ricardobcl/riak_core", "compactdvv"}}, {erlang_js, ".*", {git, "git://github.com/basho/erlang_js", "master"}}, {bitcask, ".*", {git, "git://github.com/basho/bitcask", "master"}}, {merge_index, ".*", {git, "git://github.com/basho/merge_index", From ddcea0bea3a1270fca01164ce08a1250e1f9b035 Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Fri, 4 Jan 2013 20:53:35 +0000 Subject: [PATCH 15/25] Corrected get_contents/1, obj_not_deleted/1 and simplified hash_object/1. --- src/riak_kv_index_hashtree.erl | 6 +----- src/riak_kv_util.erl | 2 +- src/riak_object.erl | 9 ++++++--- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/src/riak_kv_index_hashtree.erl b/src/riak_kv_index_hashtree.erl index a7b9ab59c7..b5e4915743 100644 --- a/src/riak_kv_index_hashtree.erl +++ b/src/riak_kv_index_hashtree.erl @@ -358,11 +358,7 @@ load_built(#state{trees=Trees}) -> %% Generate a hash value for a binary-encoded `riak_object' -spec hash_object(riak_object_t2b()) -> binary(). hash_object(RObjBin) -> - %% Normalize the `riak_object' vector clock before hashing - RObj = binary_to_term(RObjBin), - Vclock = riak_object:get_vclock(RObj,true), - UpdObj = riak_object:set_contents(RObj, lists:sort(Vclock)), - Hash = erlang:phash2(term_to_binary(UpdObj)), + Hash = erlang:phash2(term_to_binary(RObjBin)), term_to_binary(Hash). %% Fold over a given vnode's data, inserting each object into the appropriate diff --git a/src/riak_kv_util.erl b/src/riak_kv_util.erl index d4b4f9dfbc..107017b246 100644 --- a/src/riak_kv_util.erl +++ b/src/riak_kv_util.erl @@ -68,7 +68,7 @@ is_x_deleted(Obj) -> %% deleted. Return is the atom 'undefined' if all contents %% are marked deleted, or the input Obj if any of them are not. obj_not_deleted(Obj) -> - case [{M, V, C} || {M, V, C} <- riak_object:get_contents(Obj), + case [{M, V} || {M, V} <- riak_object:get_contents(Obj), dict:is_key(<<"X-Riak-Deleted">>, M) =:= false] of [] -> undefined; _ -> Obj diff --git a/src/riak_object.erl b/src/riak_object.erl index 241177e618..c785f926bc 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -35,7 +35,7 @@ -type value() :: term(). -record(r_content, { - metadata :: dict(), + metadata :: dict(), value :: term() }). @@ -43,7 +43,7 @@ -record(r_object, { bucket :: bucket(), key :: key(), - contents :: compactdvv:clock(), % [{id, count, [r_content]}] + contents :: compactdvv:clock(), % [{id, count, [riak_content()]}] updatemetadata=dict:store(clean, true, dict:new()) :: dict(), updatevalue :: term() }). @@ -278,7 +278,10 @@ value_count(#r_object{contents=Contents}) -> compactdvv:value_count(Contents). %% @doc Return the contents (a list of {metadata, value} tuples) for %% this riak_object. -spec get_contents(riak_object()) -> [riak_content()]. -get_contents(#r_object{contents=Contents}) -> compactdvv:get_values(Contents). +get_contents(#r_object{contents=Contents}) -> + Rconts = compactdvv:get_values(Contents), + [{C#r_content.metadata, C#r_content.value} || + C <- Rconts]. %% @doc Assert that this riak_object has no siblings and return its associated %% metadata. This function will fail with a badmatch error if the From e1492fe5a1efab445705038c26ffda4e5d707eb4 Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Wed, 9 Jan 2013 20:50:48 +0000 Subject: [PATCH 16/25] Corrected a few bugs and unit tests now all pass. --- src/riak_object.erl | 107 ++++++++++++++++++++++++-------------------- 1 file changed, 59 insertions(+), 48 deletions(-) diff --git a/src/riak_object.erl b/src/riak_object.erl index c785f926bc..a0df63a4d8 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -138,11 +138,11 @@ equal2(Obj1,Obj2) -> equal_contents([],[]) -> true; equal_contents(_,[]) -> false; equal_contents([],_) -> false; -equal_contents([C1|R1],[C2|R2]) -> - MD1 = lists:keysort(1, dict:to_list(C1#r_content.metadata)), - MD2 = lists:keysort(1, dict:to_list(C2#r_content.metadata)), +equal_contents([{M1,V1}|R1],[{M2,V2}|R2]) -> + MD1 = lists:keysort(1, dict:to_list(M1)), + MD2 = lists:keysort(1, dict:to_list(M2)), (MD1 =:= MD2) - andalso (C1#r_content.value =:= C2#r_content.value) + andalso (V1 =:= V2) andalso equal_contents(R1,R2). @@ -156,8 +156,15 @@ equal_contents([C1|R1],[C2|R2]) -> % X-Riak-Last-Modified header. -spec reconcile([riak_object()], boolean()) -> riak_object(). reconcile(RObjs, AllowMultiple) -> - AllContents = [O#r_object.contents || O <- RObjs], - SyncedContents = compactdvv:sync(AllContents), + %% filter clocks with null as id + AllContents = + [lists:filter(fun ({I,_,_}) -> I =/= null end, O#r_object.contents) || O <- RObjs], + AllContents2 = + case AllContents of + [] -> [O#r_object.contents || O <- RObjs]; + _ -> AllContents + end, + SyncedContents = compactdvv:sync(AllContents2), Contents = case AllowMultiple of false -> compactdvv:lww(SyncedContents); true -> SyncedContents @@ -187,20 +194,16 @@ most_recent_content(Contents) -> compactdvv:lww(fun riak_object:compare_content_dates/2, Contents). -spec compare_content_dates(riak_content(), riak_content()) -> boolean(). -compare_content_dates(C1, C2) -> - compare_metadata(C1#r_content.metadata,C2#r_content.metadata). - --spec compare_metadata(dict(), dict()) -> boolean(). -compare_metadata(MD1,MD2) -> - D1 = dict:fetch(<<"X-Riak-Last-Modified">>, MD1), - D2 = dict:fetch(<<"X-Riak-Last-Modified">>, MD2), +compare_content_dates(C1,C2) -> + D1 = dict:fetch(<<"X-Riak-Last-Modified">>, C1#r_content.metadata), + D2 = dict:fetch(<<"X-Riak-Last-Modified">>, C2#r_content.metadata), %% true if C1 was modifed later than C2 Cmp1 = riak_core_util:compare_dates(D1, D2), %% true if C2 was modifed later than C1 Cmp2 = riak_core_util:compare_dates(D2, D1), %% check for deleted objects - Del1 = dict:is_key(<<"X-Riak-Deleted">>, MD1), - Del2 = dict:is_key(<<"X-Riak-Deleted">>, MD2), + Del1 = dict:is_key(<<"X-Riak-Deleted">>, C1#r_content.metadata), + Del2 = dict:is_key(<<"X-Riak-Deleted">>, C2#r_content.metadata), SameDate = (Cmp1 =:= Cmp2), case {SameDate, Del1, Del2} of @@ -213,7 +216,7 @@ compare_metadata(MD1,MD2) -> _ -> %% Dates equal and either both present or both deleted, compare %% by opaque contents. - MD1 < MD2 + C1 < C2 end. @@ -240,20 +243,30 @@ update(Obj) -> % update_metadata() calls) to this riak_object. -spec apply_updates(riak_object()) -> riak_object(). apply_updates(Object=#r_object{}) -> - C = case Object#r_object.updatevalue of - undefined -> - get_vclock(Object,true); - _ -> - MD = hd(get_metadatas(Object)), - VL = Object#r_object.updatevalue, - Rcont = #r_content{metadata=MD,value=VL}, - compactdvv:set_value(get_vclock(Object,false),Rcont) - end, - case dict:find(clean, Object#r_object.updatemetadata) of - error -> - [dict:erase(clean,Object#r_object.updatemetadata) || _X <- lists:seq(1,compactdvv:value_count(C))] - end, - Object#r_object{contents=C, + CurrentContents = get_vclock(Object,true), + UpdatedContents = + case Object#r_object.updatevalue of + undefined -> + case dict:find(clean, Object#r_object.updatemetadata) of + {ok,_} -> CurrentContents; %% no changes in values or metadata + error -> + NewMD = dict:erase(clean,Object#r_object.updatemetadata), + compactdvv:map_values( + fun (R) -> #r_content{metadata=NewMD, value=R#r_content.value} end, + CurrentContents) + end; + _ -> + MD = case dict:find(clean, Object#r_object.updatemetadata) of + {ok,_} -> + hd(get_metadatas(Object)); + error -> + dict:erase(clean,Object#r_object.updatemetadata) + end, + VL = Object#r_object.updatevalue, + NewR = #r_content{metadata=MD,value=VL}, + compactdvv:set_value(CurrentContents,NewR) + end, + Object#r_object{contents=UpdatedContents, updatemetadata=dict:store(clean, true, dict:new()), updatevalue=undefined}. @@ -354,8 +367,12 @@ increment_vclock(Object=#r_object{}, Id, _TS) -> %% @doc Increment the entry for Id in O's vclock (ignore timestamp since we are not pruning). -spec increment_vclock(riak_object(), any()) -> riak_object(). increment_vclock(Object=#r_object{contents=Conts}, Id) -> - Value = #r_content{} = compactdvv:get_last_value(Conts), - C = compactdvv:syncupdate(Conts, Id, Value), + C = case Conts of + [{null,0,[V]}] -> compactdvv:syncupdate([], Id, V); + _ -> + Value = #r_content{} = compactdvv:get_last_value(Conts), + compactdvv:syncupdate(Conts, Id, Value) + end, set_contents(Object, C). -spec update_vclock(riak_object(), riak_object(), any()) -> riak_object(). @@ -530,7 +547,9 @@ dejsonify_contents([{struct, [ {<<"counter">>, Counter}, {<<"values">>, Values}]} | T], Accum) -> V = dejsonify_values(Values), - dejsonify_contents(T, [{Id,Counter,V}|Accum]). + Id2 = binary_to_term(zlib:unzip(base64:decode(Id))), + Counter2 = binary_to_term(zlib:unzip(base64:decode(Counter))), + dejsonify_contents(T, [{Id2,Counter2,V}|Accum]). dejsonify_values(V) -> dejsonify_values(V,[]). @@ -596,8 +615,8 @@ update_test() -> reconcile_test() -> {O,O2} = update_test(), O3 = riak_object:increment_vclock(O2,self()), - O3 = riak_object:reconcile(O,O3,true), - O3 = riak_object:reconcile(O,O3,false), + O3 = riak_object:reconcile([O,O3],true), + O3 = riak_object:reconcile([O,O3],false), {O,O3}. merge1_test() -> @@ -612,7 +631,7 @@ merge2_test() -> O1 = riak_object:increment_vclock(object_test(), node1), O2 = riak_object:increment_vclock(riak_object:new(B,K,V), node2), O3 = riak_object:syntactic_merge(O1, O2), - [node1, node2] = lists:sort([N || {_,{N,_}} <- riak_object:get_vclock(O3,false)]), + [node1, node2] = lists:sort([N || {N,_,_} <- riak_object:get_vclock(O3,false)]), 2 = riak_object:value_count(O3). merge3_test() -> @@ -659,7 +678,7 @@ inequality_value_test() -> inequality_multivalue_test() -> O1 = riak_object:new(<<"test">>, <<"a">>, "value"), - [C] = riak_object:get_contents(O1), + [C] = riak_object:get_vclock(O1,true), O1p = riak_object:set_contents(O1, [C,C]), false = riak_object:equal(O1, O1p), false = riak_object:equal(O1p, O1). @@ -727,7 +746,7 @@ date_reconcile_test() -> httpd_util:rfc1123_date( calendar:gregorian_seconds_to_datetime(D+1)), get_metadata(O3)))), - O5 = riak_object:reconcile(O2,O4, false), + O5 = riak_object:reconcile([O2,O4], false), false = riak_object:equal(O2, O5), false = riak_object:equal(O4, O5). @@ -757,14 +776,6 @@ is_updated_test() -> OVu = riak_object:update_value(O, testupdate), ?assert(is_updated(OVu)). -remove_duplicates_test() -> - O0 = riak_object:new(<<"test">>, <<"test">>, zero), - O1 = riak_object:new(<<"test">>, <<"test">>, one), - ND = remove_duplicate_objects([O0, O0, O1, O1, O0, O1]), - ?assertEqual(2, length(ND)), - ?assert(lists:member(O0, ND)), - ?assert(lists:member(O1, ND)). - new_with_ctype_test() -> O = riak_object:new(<<"b">>, <<"k">>, <<"{\"a\":1}">>, "application/json"), ?assertEqual("application/json", dict:fetch(?MD_CTYPE, riak_object:get_metadata(O))). @@ -816,8 +827,8 @@ check_most_recent({V1, T1, D1}, {V2, T2, D2}) -> ?assertEqual(C3, C4), - C3#r_content.value. - + (compactdvv:get_last_value(C3))#r_content.value. + determinstic_most_recent_test() -> D = calendar:datetime_to_gregorian_seconds( From e8e9ecea29eba807b47bef30fabd3e4e1dcf385e Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Sat, 12 Jan 2013 00:20:04 +0000 Subject: [PATCH 17/25] Corrected some bugs, namely set_vclock/2, reconcile/2, update_vclock/3 and increment_vclock/2. --- src/riak_kv_delete.erl | 2 +- src/riak_kv_index_hashtree.erl | 2 +- src/riak_kv_vnode.erl | 6 +-- src/riak_object.erl | 90 ++++++++++++++-------------------- 4 files changed, 43 insertions(+), 57 deletions(-) diff --git a/src/riak_kv_delete.erl b/src/riak_kv_delete.erl index 9ca13c223b..d060cc2dc6 100644 --- a/src/riak_kv_delete.erl +++ b/src/riak_kv_delete.erl @@ -66,7 +66,7 @@ delete(ReqId,Bucket,Key,Options,Timeout,Client,ClientId,undefined) -> case C:get(Bucket,Key,[{r,R},{pr,PR},{timeout,Timeout}]) of {ok, OrigObj} -> RemainingTime = Timeout - (riak_core_util:moment() - RealStartTime), - delete(ReqId,Bucket,Key,Options,RemainingTime,Client,ClientId,riak_object:get_vclocks(OrigObj, false)); + delete(ReqId,Bucket,Key,Options,RemainingTime,Client,ClientId,riak_object:get_vclock(OrigObj, false)); {error, notfound} -> ?DTRACE(?C_DELETE_INIT1, [-2], []), Client ! {ReqId, {error, notfound}}; diff --git a/src/riak_kv_index_hashtree.erl b/src/riak_kv_index_hashtree.erl index b5e4915743..f3a8f128e4 100644 --- a/src/riak_kv_index_hashtree.erl +++ b/src/riak_kv_index_hashtree.erl @@ -358,7 +358,7 @@ load_built(#state{trees=Trees}) -> %% Generate a hash value for a binary-encoded `riak_object' -spec hash_object(riak_object_t2b()) -> binary(). hash_object(RObjBin) -> - Hash = erlang:phash2(term_to_binary(RObjBin)), + Hash = erlang:phash2(RObjBin), term_to_binary(Hash). %% Fold over a given vnode's data, inserting each object into the appropriate diff --git a/src/riak_kv_vnode.erl b/src/riak_kv_vnode.erl index b3a18baed1..a15bc124a1 100644 --- a/src/riak_kv_vnode.erl +++ b/src/riak_kv_vnode.erl @@ -890,11 +890,11 @@ put_merge(false, false, CurObj, UpdObj, _VId, _StartTime) -> % coord=false, LWW= false -> {newobj, ResObj} end; -put_merge(true, true, _CurObj, UpdObj, VId, StartTime) -> % coord=false, LWW=true +put_merge(true, true, _CurObj, UpdObj, VId, StartTime) -> % coord=true, LWW=true {newobj, riak_object:increment_vclock(UpdObj, VId, StartTime)}; put_merge(true, false, CurObj, UpdObj, VId, StartTime) -> - CurObj1 = riak_object:update(CurObj), - UpdObj1 = riak_object:update(UpdObj), + CurObj1 = riak_object:apply_updates(CurObj), + UpdObj1 = riak_object:apply_updates(UpdObj), ResObj = riak_object:update_vclock(UpdObj1, CurObj1, VId, StartTime), {newobj, ResObj}. diff --git a/src/riak_object.erl b/src/riak_object.erl index a0df63a4d8..89a9f4b6ff 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -55,6 +55,7 @@ -type index_value() :: integer() | binary(). -define(MAX_KEY_SIZE, 65536). +-define(NULLID,null). -export([new/3, new/4, ensure_robject/1, equal/2, new_vclock/0, equal_vclock/2]). -export([increment_vclock/2, increment_vclock/3, update_vclock/4, update_vclock/3]). @@ -89,18 +90,18 @@ new(B, K, V, MD) when is_binary(B), is_binary(K) -> false -> case MD of no_initial_metadata -> - Contents = compactdvv:new([#r_content{metadata=dict:new(), value=V}]), + Contents = compactdvv:new(?NULLID,[#r_content{metadata=dict:new(), value=V}]), #r_object{bucket=B,key=K, contents=Contents}; _ -> - Contents = compactdvv:new([#r_content{metadata=MD, value=V}]), + Contents = compactdvv:new(?NULLID,[#r_content{metadata=MD, value=V}]), #r_object{bucket=B,key=K,updatemetadata=MD, contents=Contents} end end. -spec new_vclock() -> compactdvv:clock(). -new_vclock() -> compactdvv:new(). +new_vclock() -> compactdvv:new(?NULLID,[]). %% Ensure the incoming term is a riak_object. -spec ensure_robject(any()) -> riak_object(). @@ -156,11 +157,11 @@ equal_contents([{M1,V1}|R1],[{M2,V2}|R2]) -> % X-Riak-Last-Modified header. -spec reconcile([riak_object()], boolean()) -> riak_object(). reconcile(RObjs, AllowMultiple) -> - %% filter clocks with null as id + %% filter clocks with null as id, except if there aren't any non-null ids AllContents = - [lists:filter(fun ({I,_,_}) -> I =/= null end, O#r_object.contents) || O <- RObjs], + [lists:filter(fun ({I,_,_}) -> I =/= ?NULLID end, O#r_object.contents) || O <- RObjs], AllContents2 = - case AllContents of + case lists:append(AllContents) of [] -> [O#r_object.contents || O <- RObjs]; _ -> AllContents end, @@ -180,8 +181,8 @@ syntactic_merge(CurrentObj, NewObj) -> %% with update information. Vclock is not updated %% but since no data is lost the objects will be %% fixed if rewritten. - UCurr = update(CurrentObj), - UNew = update(NewObj), + UCurr = apply_updates(CurrentObj), + UNew = apply_updates(NewObj), reconcile([UNew, UCurr], true). @@ -220,25 +221,6 @@ compare_content_dates(C1,C2) -> end. --spec is_updated(riak_object()) -> boolean(). -is_updated(_Object=#r_object{updatemetadata=M,updatevalue=V}) -> - case dict:find(clean, M) of - error -> true; - {ok,_} -> - case V of - undefined -> false; - _ -> true - end - end. - --spec update(riak_object()) -> riak_object(). -update(Obj) -> - case is_updated(Obj) of - true -> apply_updates(Obj); - false -> Obj - end. - - % @doc Promote pending updates (made with the update_value() and % update_metadata() calls) to this riak_object. -spec apply_updates(riak_object()) -> riak_object(). @@ -346,9 +328,12 @@ get_update_value(#r_object{updatevalue=UV}) -> UV. %% @doc INTERNAL USE ONLY. Set the vclock of riak_object O to V. -spec set_vclock(riak_object(), compactdvv:clock()) -> riak_object(). set_vclock(Object=#r_object{contents=Contents}, Clock) -> - 1 = compactdvv:value_count(Contents), - V = compactdvv:get_last_value(Contents), - C = compactdvv:set_value(Clock,V), + C = case Object#r_object.updatevalue of + undefined -> + V = compactdvv:get_last_value(Contents), + compactdvv:set_value(Clock,V); + _ -> compactdvv:set_value(Clock,Object#r_object.updatevalue) + end, Object#r_object{contents=C}. %% @doc INTERNAL USE ONLY. Set the contents of riak_object @@ -367,23 +352,21 @@ increment_vclock(Object=#r_object{}, Id, _TS) -> %% @doc Increment the entry for Id in O's vclock (ignore timestamp since we are not pruning). -spec increment_vclock(riak_object(), any()) -> riak_object(). increment_vclock(Object=#r_object{contents=Conts}, Id) -> - C = case Conts of - [{null,0,[V]}] -> compactdvv:syncupdate([], Id, V); - _ -> - Value = #r_content{} = compactdvv:get_last_value(Conts), - compactdvv:syncupdate(Conts, Id, Value) - end, - set_contents(Object, C). + Conts2 = lists:filter(fun ({I,_,_}) -> I =/= ?NULLID end, Conts), + Value = compactdvv:get_last_value(Conts), + NewConts = compactdvv:syncupdate(Conts2, Id, Value), + set_contents(Object, NewConts). -spec update_vclock(riak_object(), riak_object(), any()) -> riak_object(). update_vclock(ObjectC=#r_object{contents=ContC}, #r_object{contents=ContR}, Id) -> - Value = #r_content{} = compactdvv:get_last_value(ContC), - C = compactdvv:syncupdate(ContC, ContR, Id, Value), - set_contents(ObjectC, C). + ContC2 = lists:filter(fun ({I,_,_}) -> I =/= ?NULLID end, ContC), + Value = compactdvv:get_last_value(ContC), + NewConts = compactdvv:syncupdate(ContC2, ContR, Id, Value), + set_contents(ObjectC, NewConts). -spec update_vclock(riak_object(), riak_object(), any(), non_neg_integer()) -> riak_object(). update_vclock(ObjectC=#r_object{}, ObjectR=#r_object{}, Id, _TS) -> - increment_vclock(ObjectC, ObjectR, Id). + update_vclock(ObjectC, ObjectR, Id). @@ -766,16 +749,6 @@ get_update_metadata_test() -> riak_object:get_update_metadata( riak_object:update_metadata(O, NewMD))). -is_updated_test() -> - O = riak_object:new(<<"test">>, <<"test">>, test), - ?assertNot(is_updated(O)), - OMu = riak_object:update_metadata( - O, dict:store(<<"X-Test-Update">>, "testupdate", - riak_object:get_metadata(O))), - ?assert(is_updated(OMu)), - OVu = riak_object:update_value(O, testupdate), - ?assert(is_updated(OVu)). - new_with_ctype_test() -> O = riak_object:new(<<"b">>, <<"k">>, <<"{\"a\":1}">>, "application/json"), ?assertEqual("application/json", dict:fetch(?MD_CTYPE, riak_object:get_metadata(O))). @@ -810,7 +783,20 @@ jsonify_round_trip_test() -> ?assertEqual(lists:sort(Meta), lists:sort(dict:fetch(?MD_USERMETA, get_metadata(O2)))), ?assertEqual(Links, dict:fetch(?MD_LINKS, get_metadata(O2))), ?assertEqual(lists:sort(Indexes), lists:sort(index_data(O2))), - ?assertEqual(get_contents(O), get_contents(O2)). + ?assertEqual(get_contents(O), get_contents(O2)), + O3 = increment_vclock(O,"a"), + O4 = increment_vclock(O3,"a"), + O5 = update_vclock(O4,O3,"b"), + O3b = from_json(to_json(O3)), + O4b = from_json(to_json(O4)), + O5b = from_json(to_json(O5)), + ?assert(compactdvv:equal(get_vclock(O3,true), get_vclock(O3b,true))), + ?assertEqual(get_contents(O3), get_contents(O3b)), + ?assert(compactdvv:equal(get_vclock(O4,true), get_vclock(O4b,true))), + ?assertEqual(get_contents(O4), get_contents(O4b)), + ?assert(compactdvv:equal(get_vclock(O5,true), get_vclock(O5b,true))), + ?assertEqual(get_contents(O5), get_contents(O5b)). + check_most_recent({V1, T1, D1}, {V2, T2, D2}) -> MD1 = dict:store(<<"X-Riak-Last-Modified">>, T1, D1), From f6d961d509fd3d87f5aa2a0f5dfe869b9c3fc896 Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Mon, 14 Jan 2013 18:28:23 +0000 Subject: [PATCH 18/25] Use lww while comparing timestamps in metadata. --- src/riak_object.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/riak_object.erl b/src/riak_object.erl index 89a9f4b6ff..b0773d7b79 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -167,7 +167,7 @@ reconcile(RObjs, AllowMultiple) -> end, SyncedContents = compactdvv:sync(AllContents2), Contents = case AllowMultiple of - false -> compactdvv:lww(SyncedContents); + false -> most_recent_content(SyncedContents); true -> SyncedContents end, HdObj = hd(RObjs), From dd3ac7f20f697b8e4d1b1f1b00331628aa26473b Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Fri, 25 Jan 2013 16:54:51 +0000 Subject: [PATCH 19/25] Support for new causal clock: dvvset. --- src/riak_client.erl | 6 +- src/riak_kv_backup.erl | 2 +- src/riak_kv_delete.erl | 4 +- src/riak_kv_encoding_migrate.erl | 10 +- src/riak_kv_get_core.erl | 2 +- src/riak_kv_get_fsm.erl | 4 +- src/riak_kv_pb_object.erl | 12 +- src/riak_kv_util.erl | 2 +- src/riak_kv_vnode.erl | 16 +-- src/riak_kv_wm_link_walker.erl | 2 +- src/riak_kv_wm_object.erl | 8 +- src/riak_kv_wm_utils.erl | 2 +- src/riak_object.erl | 230 ++++++++++++++++--------------- test/fsm_eqc_util.erl | 2 +- 14 files changed, 156 insertions(+), 146 deletions(-) diff --git a/src/riak_client.erl b/src/riak_client.erl index d92e3a207a..342285c5cc 100644 --- a/src/riak_client.erl +++ b/src/riak_client.erl @@ -254,7 +254,7 @@ delete(Bucket,Key,Options,Timeout) when is_list(Options) -> delete(Bucket,Key,RW,Timeout) -> delete(Bucket,Key,[{rw, RW}], Timeout). -%% @spec delete_vclock(riak_object:bucket(), riak_object:key(), vclock:vclock()) -> +%% @spec delete_vclock(riak_object:bucket(), riak_object:key(), dvvset:clock()) -> %% ok | %% {error, too_many_fails} | %% {error, notfound} | @@ -266,7 +266,7 @@ delete(Bucket,Key,RW,Timeout) -> delete_vclock(Bucket,Key,VClock) -> delete_vclock(Bucket,Key,VClock,[{rw,default}],?DEFAULT_TIMEOUT). -%% @spec delete_vclock(riak_object:bucket(), riak_object:key(), vclock::vclock(), RW :: integer()) -> +%% @spec delete_vclock(riak_object:bucket(), riak_object:key(), dvvset:clock(), RW :: integer()) -> %% ok | %% {error, too_many_fails} | %% {error, notfound} | @@ -280,7 +280,7 @@ delete_vclock(Bucket,Key,VClock,Options) when is_list(Options) -> delete_vclock(Bucket,Key,VClock,RW) -> delete_vclock(Bucket,Key,VClock,[{rw, RW}],?DEFAULT_TIMEOUT). -%% @spec delete_vclock(riak_object:bucket(), riak_object:key(), vclock:vclock(), RW :: integer(), +%% @spec delete_vclock(riak_object:bucket(), riak_object:key(), dvvset:clock(), RW :: integer(), %% TimeoutMillisecs :: integer()) -> %% ok | %% {error, too_many_fails} | diff --git a/src/riak_kv_backup.erl b/src/riak_kv_backup.erl index 63d373394f..95649da172 100644 --- a/src/riak_kv_backup.erl +++ b/src/riak_kv_backup.erl @@ -153,7 +153,7 @@ read_and_restore_function(Client, BinTerm) -> %% If the bucket name is an atom, convert it to a binary... make_binary_bucket(Bucket, Key, OriginalObj) when is_atom(Bucket) -> Bucket1 = list_to_binary(atom_to_list(Bucket)), - OriginalContents = riak_object:get_vclock(OriginalObj,true), + OriginalContents = riak_object:get_contents(OriginalObj), % We can't change the bucket name without creating a new object... NewObj = riak_object:new(Bucket1, Key, placeholder), diff --git a/src/riak_kv_delete.erl b/src/riak_kv_delete.erl index d060cc2dc6..96ed3f2da3 100644 --- a/src/riak_kv_delete.erl +++ b/src/riak_kv_delete.erl @@ -66,7 +66,7 @@ delete(ReqId,Bucket,Key,Options,Timeout,Client,ClientId,undefined) -> case C:get(Bucket,Key,[{r,R},{pr,PR},{timeout,Timeout}]) of {ok, OrigObj} -> RemainingTime = Timeout - (riak_core_util:moment() - RealStartTime), - delete(ReqId,Bucket,Key,Options,RemainingTime,Client,ClientId,riak_object:get_vclock(OrigObj, false)); + delete(ReqId,Bucket,Key,Options,RemainingTime,Client,ClientId,riak_object:get_vclock(OrigObj)); {error, notfound} -> ?DTRACE(?C_DELETE_INIT1, [-2], []), Client ! {ReqId, {error, notfound}}; @@ -85,7 +85,7 @@ delete(ReqId,Bucket,Key,Options,Timeout,Client,ClientId,VClock) -> {W, PW, DW} -> Obj0 = riak_object:new(Bucket, Key, <<>>, dict:store(?MD_DELETED, "true", dict:new())), - Tombstone = riak_object:set_vclock(Obj0, VClock), + Tombstone = riak_object:set_vclock(Obj0, VClock), %% same value as current Obj0 {ok,C} = riak:local_client(ClientId), Reply = C:put(Tombstone, [{w,W},{pw,PW},{dw, DW},{timeout,Timeout}]), Client ! {ReqId, Reply}, diff --git a/src/riak_kv_encoding_migrate.erl b/src/riak_kv_encoding_migrate.erl index 782b3a1feb..4e984e506c 100644 --- a/src/riak_kv_encoding_migrate.erl +++ b/src/riak_kv_encoding_migrate.erl @@ -248,7 +248,7 @@ decode_object(RO) -> copy_object(RO, B, K) -> {ok, RC} = riak:local_client(), NO1 = riak_object:new(B, K, <<>>), - NO2 = riak_object:set_contents(NO1, riak_object:get_vclock(RO,true)), + NO2 = riak_object:set_contents(NO1, riak_object:get_contents(RO)), RC:put(NO2). %% Force writes to fail to test failure behavior @@ -307,16 +307,16 @@ test_migration() -> {ok, []} = riak_kv_encoding_migrate:delete_migrated_objects(EObjs), {not_needed, [], []} = riak_kv_encoding_migrate:check_cluster(), - C1 = riak_object:get_contents(O2), + C1 = riak_object:get_md_values(O2), - C2 = riak_object:get_contents(O4), + C2 = riak_object:get_md_values(O4), {ok, MO1} = RC:get(<<"me@mine">>, <<"key">>), - nearly_equal_contents(C1, riak_object:get_contents(MO1)), + nearly_equal_contents(C1, riak_object:get_md_values(MO1)), true = riak_object:descendant(MO1, O2), {ok, MO2} = RC:get(<<"bucket">>, <<"key@">>), - nearly_equal_contents(C2, riak_object:get_contents(MO2)), + nearly_equal_contents(C2, riak_object:get_md_values(MO2)), true = riak_object:descendant(MO2, O4), %% Use precommit hook to test failure scenarios diff --git a/src/riak_kv_get_core.erl b/src/riak_kv_get_core.erl index 734289ea92..1ea7f351ea 100644 --- a/src/riak_kv_get_core.erl +++ b/src/riak_kv_get_core.erl @@ -118,7 +118,7 @@ response(GetCore = #getcore{r = R, num_ok = NumOk, num_notfound = NumNotFound, ok -> Merged; % {ok, MObj} tombstone when DeletedVClock -> - {error, {deleted, riak_object:get_vclock(MObj,false)}}; + {error, {deleted, riak_object:get_vclock(MObj)}}; _ -> % tombstone or notfound {error, notfound} end; diff --git a/src/riak_kv_get_fsm.erl b/src/riak_kv_get_fsm.erl index 745278543e..40d33701d3 100644 --- a/src/riak_kv_get_fsm.erl +++ b/src/riak_kv_get_fsm.erl @@ -466,10 +466,10 @@ update_stats(_, #state{ bkey = {Bucket, _}, tracked_bucket = StatTracked, calcul %% calling term_to_binary/1, but it should be easier on memory, %% especially for objects with large values. calculate_objsize(Bucket, Obj) -> - Contents = riak_object:get_contents(Obj), + Contents = riak_object:get_md_values(Obj), size(Bucket) + size(riak_object:key(Obj)) + - size(term_to_binary(riak_object:get_vclock(Obj,false))) + + size(term_to_binary(riak_object:get_vclock(Obj))) + lists:sum([size(term_to_binary(MD)) + value_size(Value) || {MD, Value} <- Contents]). value_size(Value) when is_binary(Value) -> size(Value); diff --git a/src/riak_kv_pb_object.erl b/src/riak_kv_pb_object.erl index cdb1f5bfff..400d4d5a88 100644 --- a/src/riak_kv_pb_object.erl +++ b/src/riak_kv_pb_object.erl @@ -111,11 +111,11 @@ process(#rpbgetreq{bucket=B, key=K, r=R0, pr=PR0, notfound_ok=NFOk, make_option(notfound_ok, NFOk) ++ make_option(basic_quorum, BQ)) of {ok, O} -> - case riak_object:equal_vclock(erlify_rpbvc(VClock),riak_object:get_vclock(O,false)) of + case riak_object:equal_vclock(erlify_rpbvc(VClock),riak_object:get_vclock(O)) of true -> {reply, #rpbgetresp{unchanged = true}, State}; _ -> - Contents = riak_object:get_contents(O), + Contents = riak_object:get_md_values(O), PbContent = case Head of true -> %% Remove all the 'value' fields from the contents @@ -127,7 +127,7 @@ process(#rpbgetreq{bucket=B, key=K, r=R0, pr=PR0, notfound_ok=NFOk, riak_pb_kv_codec:encode_contents(Contents) end, {reply, #rpbgetresp{content = PbContent, - vclock = pbify_rpbvc(riak_object:get_vclock(O,false))}, State} + vclock = pbify_rpbvc(riak_object:get_vclock(O))}, State} end; {error, {deleted, TombstoneVClock}} -> %% Found a tombstone - return its vector clock so it can @@ -146,7 +146,7 @@ process(#rpbputreq{bucket=B, key=K, vclock=PbVC, {ok, _} when NoneMatch -> {error, "match_found", State}; {ok, O} when NotMod -> - case erlify_rpbvc(PbVC) == riak_object:get_vclock(O,false) of + case erlify_rpbvc(PbVC) == riak_object:get_vclock(O) of true -> process(Req#rpbputreq{if_not_modified=undefined, if_none_match=undefined}, @@ -203,7 +203,7 @@ process(#rpbputreq{bucket=B, key=K, vclock=PbVC, content=RpbContent, ok -> {reply, #rpbputresp{}, State}; {ok, Obj} -> - Contents = riak_object:get_contents(Obj), + Contents = riak_object:get_md_values(Obj), PbContents = case ReturnHead of true -> %% Remove all the 'value' fields from the contents @@ -215,7 +215,7 @@ process(#rpbputreq{bucket=B, key=K, vclock=PbVC, content=RpbContent, riak_pb_kv_codec:encode_contents(Contents) end, PutResp = #rpbputresp{content = PbContents, - vclock = pbify_rpbvc(riak_object:get_vclock(Obj,false)), + vclock = pbify_rpbvc(riak_object:get_vclock(Obj)), key = ReturnKey }, {reply, PutResp, State}; diff --git a/src/riak_kv_util.erl b/src/riak_kv_util.erl index 107017b246..e83ad032b5 100644 --- a/src/riak_kv_util.erl +++ b/src/riak_kv_util.erl @@ -68,7 +68,7 @@ is_x_deleted(Obj) -> %% deleted. Return is the atom 'undefined' if all contents %% are marked deleted, or the input Obj if any of them are not. obj_not_deleted(Obj) -> - case [{M, V} || {M, V} <- riak_object:get_contents(Obj), + case [{M, V} || {M, V} <- riak_object:get_md_values(Obj), dict:is_key(<<"X-Riak-Deleted">>, M) =:= false] of [] -> undefined; _ -> Obj diff --git a/src/riak_kv_vnode.erl b/src/riak_kv_vnode.erl index a15bc124a1..8b0e96a21e 100644 --- a/src/riak_kv_vnode.erl +++ b/src/riak_kv_vnode.erl @@ -814,6 +814,9 @@ prepare_put(#state{vnodeid=VId, {{true, ObjToStore}, PutArgs#putargs{index_specs=IndexSpecs, is_index=IndexBackend}}; {ok, Val, _UpdModState} -> OldObj = binary_to_term(Val), + + io:format("~nO2:~p~n",[OldObj]), + io:format("~nO3:~p~n",[RObj]), case put_merge(Coord, LWW, OldObj, RObj, VId, StartTime) of {oldobj, OldObj1} -> {{false, OldObj1}, PutArgs}; @@ -883,13 +886,7 @@ enforce_allow_mult(Obj, BProps) -> put_merge(false, true, _CurObj, UpdObj, _VId, _StartTime) -> % coord=false, LWW=true {newobj, UpdObj}; put_merge(false, false, CurObj, UpdObj, _VId, _StartTime) -> % coord=false, LWW=false - ResObj = riak_object:syntactic_merge(CurObj, UpdObj), - case riak_object:equal_vclock(ResObj,CurObj) of - true -> - {oldobj, CurObj}; - false -> - {newobj, ResObj} - end; + {newobj, riak_object:syntactic_merge(CurObj, UpdObj)}; put_merge(true, true, _CurObj, UpdObj, VId, StartTime) -> % coord=true, LWW=true {newobj, riak_object:increment_vclock(UpdObj, VId, StartTime)}; put_merge(true, false, CurObj, UpdObj, VId, StartTime) -> @@ -1079,8 +1076,8 @@ do_get_vclocks(KeyList,_State=#state{mod=Mod,modstate=ModState}) -> %% @private do_get_vclock({Bucket, Key}, Mod, ModState) -> case Mod:get(Bucket, Key, ModState) of - {error, not_found, _UpdModState} -> riak_object:new_vclock(); - {ok, Val, _UpdModState} -> riak_object:get_vclock(binary_to_term(Val),false) + {error, not_found, _UpdModState} -> []; %riak_object:new_vclock(); + {ok, Val, _UpdModState} -> riak_object:get_vclock(binary_to_term(Val)) end. %% @private @@ -1410,6 +1407,7 @@ backend_with_known_key(BackendMod) -> B = <<"f">>, K = <<"b">>, O = riak_object:new(B, K, <<"z">>), + io:format("~nO1:~p~n",[O]), {noreply, S2} = handle_command(?KV_PUT_REQ{bkey={B,K}, object=O, req_id=123, diff --git a/src/riak_kv_wm_link_walker.erl b/src/riak_kv_wm_link_walker.erl index a8b6649c5b..5ddd388ed1 100644 --- a/src/riak_kv_wm_link_walker.erl +++ b/src/riak_kv_wm_link_walker.erl @@ -403,7 +403,7 @@ multipart_encode_body(NestedResults, Ctx) when is_list(NestedResults) -> multipart_encode_body(RiakObject, Ctx) -> APIVersion = Ctx#ctx.api_version, Prefix = Ctx#ctx.prefix, - [{MD, V}|Rest] = riak_object:get_contents(RiakObject), + [{MD, V}|Rest] = riak_object:get_md_values(RiakObject), {VHead, Vclock} = riak_kv_wm_utils:vclock_header(RiakObject), [VHead,": ",Vclock,"\r\n", diff --git a/src/riak_kv_wm_object.erl b/src/riak_kv_wm_object.erl index f30f34543c..fb991fb836 100644 --- a/src/riak_kv_wm_object.erl +++ b/src/riak_kv_wm_object.erl @@ -757,7 +757,7 @@ produce_multipart_body(RD, Ctx=#ctx{doc={ok, Doc}, bucket=B, prefix=P}) -> Boundary = riak_core_util:unique_id_62(), {[[["\r\n--",Boundary,"\r\n", riak_kv_wm_utils:multipart_encode_body(P, B, Content, APIVersion)] - || Content <- riak_object:get_contents(Doc)], + || Content <- riak_object:get_md_values(Doc)], "\r\n--",Boundary,"--\r\n"], wrq:set_resp_header(?HEAD_CTYPE, "multipart/mixed; boundary="++Boundary, @@ -774,7 +774,7 @@ produce_multipart_body(RD, Ctx=#ctx{doc={ok, Doc}, bucket=B, prefix=P}) -> select_doc(#ctx{doc={ok, Doc}, vtag=Vtag}) -> case riak_object:get_update_value(Doc) of undefined -> - case riak_object:get_contents(Doc) of + case riak_object:get_md_values(Doc) of [Single] -> Single; Mult -> case lists:dropwhile( @@ -804,7 +804,7 @@ encode_vclock_header(RD, #ctx{doc={error, {deleted, VClock}}}) -> %% into something suitable for an HTTP header vclock_header(Doc) -> {?HEAD_VCLOCK, - encode_vclock(riak_object:get_vclock(Doc, false))}. + encode_vclock(riak_object:get_vclock(Doc))}. encode_vclock(VClock) -> binary_to_list(base64:encode(zlib:zip(term_to_binary(VClock)))). @@ -864,7 +864,7 @@ generate_etag(RD, Ctx) -> {dict:fetch(?MD_VTAG, MD), RD, Ctx}; multiple_choices -> {ok, Doc} = Ctx#ctx.doc, - <> = crypto:md5(term_to_binary(riak_object:get_vclock(Doc,false))), + <> = crypto:md5(term_to_binary(riak_object:get_vclock(Doc))), {riak_core_util:integer_to_list(ETag, 62), RD, Ctx} end. diff --git a/src/riak_kv_wm_utils.erl b/src/riak_kv_wm_utils.erl index 5b4e185490..3d3000e2e7 100644 --- a/src/riak_kv_wm_utils.erl +++ b/src/riak_kv_wm_utils.erl @@ -149,7 +149,7 @@ multipart_encode_body(Prefix, Bucket, {MD, V}, APIVersion) -> %% into something suitable for an HTTP header vclock_header(Doc) -> {?HEAD_VCLOCK, - encode_vclock(riak_object:get_vclock(Doc,false))}. + encode_vclock(riak_object:get_vclock(Doc))}. encode_vclock(VClock) -> binary_to_list(base64:encode(zlib:zip(term_to_binary(VClock)))). diff --git a/src/riak_object.erl b/src/riak_object.erl index b0773d7b79..a9fdd0ef2e 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -43,7 +43,7 @@ -record(r_object, { bucket :: bucket(), key :: key(), - contents :: compactdvv:clock(), % [{id, count, [riak_content()]}] + contents :: dvvset:clock(), %% a container for riak_content()'s, with built-in causal information updatemetadata=dict:store(clean, true, dict:new()) :: dict(), updatevalue :: term() }). @@ -55,16 +55,15 @@ -type index_value() :: integer() | binary(). -define(MAX_KEY_SIZE, 65536). --define(NULLID,null). -export([new/3, new/4, ensure_robject/1, equal/2, new_vclock/0, equal_vclock/2]). -export([increment_vclock/2, increment_vclock/3, update_vclock/4, update_vclock/3]). -export([reconcile/2, descendant/2, strict_descendant/2, key/1]). -export([get_metadata/1, get_metadatas/1, get_values/1, get_value/1]). --export([get_vclock/2, update_value/2, update_metadata/2, bucket/1, value_count/1]). +-export([get_vclock/1, update_value/2, update_metadata/2, bucket/1, value_count/1]). -export([get_update_metadata/1, get_update_value/1, get_contents/1]). -export([apply_updates/1, syntactic_merge/2, compare_content_dates/2, set_lww/1]). --export([to_json/1, from_json/1]). +-export([get_md_values/1,to_json/1, from_json/1]). -export([index_specs/1, diff_index_specs/2]). -export([set_contents/2, set_vclock/2]). %% INTERNAL, only for riak_* @@ -90,18 +89,18 @@ new(B, K, V, MD) when is_binary(B), is_binary(K) -> false -> case MD of no_initial_metadata -> - Contents = compactdvv:new(?NULLID,[#r_content{metadata=dict:new(), value=V}]), + Contents = dvvset:new(#r_content{metadata=dict:new(), value=V}), #r_object{bucket=B,key=K, contents=Contents}; _ -> - Contents = compactdvv:new(?NULLID,[#r_content{metadata=MD, value=V}]), + Contents = dvvset:new(#r_content{metadata=MD, value=V}), #r_object{bucket=B,key=K,updatemetadata=MD, contents=Contents} end end. --spec new_vclock() -> compactdvv:clock(). -new_vclock() -> compactdvv:new(?NULLID,[]). +-spec new_vclock() -> dvvset:clock(). +new_vclock() -> dvvset:new2([]). %% Ensure the incoming term is a riak_object. -spec ensure_robject(any()) -> riak_object(). @@ -109,15 +108,15 @@ ensure_robject(Obj = #r_object{}) -> Obj. -spec strict_descendant(riak_object(), riak_object()) -> boolean(). strict_descendant(#r_object{contents=C1},#r_object{contents=C2}) -> - compactdvv:strict_descendant(C1,C2). + dvvset:less(C2,C1). -spec descendant(riak_object(), riak_object()) -> boolean(). descendant(#r_object{contents=C1},#r_object{contents=C2}) -> - compactdvv:equal(C1,C2) orelse compactdvv:strict_descendant(C1,C2). + dvvset:equal(C1,C2) orelse dvvset:less(C2,C1). -spec equal_vclock(riak_object(), riak_object()) -> boolean(). equal_vclock(#r_object{contents=C1},#r_object{contents=C2}) -> - compactdvv:equal(C1,C2). + dvvset:equal(C1,C2). -spec equal(riak_object(), riak_object()) -> boolean(). %% @doc Deep (expensive) comparison of Riak objects. @@ -132,8 +131,8 @@ equal2(Obj1,Obj2) -> (UM1 =:= UM2) andalso (Obj1#r_object.updatevalue =:= Obj2#r_object.updatevalue) andalso begin - Cont1 = lists:sort(get_contents(Obj1)), - Cont2 = lists:sort(get_contents(Obj2)), + Cont1 = lists:sort(get_md_values(Obj1)), + Cont2 = lists:sort(get_md_values(Obj2)), equal_contents(Cont1,Cont2) end. equal_contents([],[]) -> true; @@ -157,15 +156,8 @@ equal_contents([{M1,V1}|R1],[{M2,V2}|R2]) -> % X-Riak-Last-Modified header. -spec reconcile([riak_object()], boolean()) -> riak_object(). reconcile(RObjs, AllowMultiple) -> - %% filter clocks with null as id, except if there aren't any non-null ids - AllContents = - [lists:filter(fun ({I,_,_}) -> I =/= ?NULLID end, O#r_object.contents) || O <- RObjs], - AllContents2 = - case lists:append(AllContents) of - [] -> [O#r_object.contents || O <- RObjs]; - _ -> AllContents - end, - SyncedContents = compactdvv:sync(AllContents2), + AllContents = [O#r_object.contents || O <- RObjs], + SyncedContents = dvvset:sync(AllContents), Contents = case AllowMultiple of false -> most_recent_content(SyncedContents); true -> SyncedContents @@ -177,10 +169,6 @@ reconcile(RObjs, AllowMultiple) -> -spec syntactic_merge(riak_object(), riak_object()) -> riak_object(). syntactic_merge(CurrentObj, NewObj) -> - %% Paranoia in case objects were incorrectly stored - %% with update information. Vclock is not updated - %% but since no data is lost the objects will be - %% fixed if rewritten. UCurr = apply_updates(CurrentObj), UNew = apply_updates(NewObj), reconcile([UNew, UCurr], true). @@ -192,7 +180,7 @@ set_lww(Object=#r_object{contents=C}) -> -spec most_recent_content(riak_content()) -> riak_content(). most_recent_content(Contents) -> - compactdvv:lww(fun riak_object:compare_content_dates/2, Contents). + dvvset:lww(fun riak_object:compare_content_dates/2, Contents). -spec compare_content_dates(riak_content(), riak_content()) -> boolean(). compare_content_dates(C1,C2) -> @@ -225,7 +213,7 @@ compare_content_dates(C1,C2) -> % update_metadata() calls) to this riak_object. -spec apply_updates(riak_object()) -> riak_object(). apply_updates(Object=#r_object{}) -> - CurrentContents = get_vclock(Object,true), + CurrentContents = get_contents(Object), UpdatedContents = case Object#r_object.updatevalue of undefined -> @@ -233,7 +221,7 @@ apply_updates(Object=#r_object{}) -> {ok,_} -> CurrentContents; %% no changes in values or metadata error -> NewMD = dict:erase(clean,Object#r_object.updatemetadata), - compactdvv:map_values( + dvvset:map( fun (R) -> #r_content{metadata=NewMD, value=R#r_content.value} end, CurrentContents) end; @@ -246,7 +234,11 @@ apply_updates(Object=#r_object{}) -> end, VL = Object#r_object.updatevalue, NewR = #r_content{metadata=MD,value=VL}, - compactdvv:set_value(CurrentContents,NewR) + %% extract the causal information + VersionVector = dvvset:join(CurrentContents), + %% construct a new clock with the same causal information as the previous, + %% but with the new value only. + dvvset:new(VersionVector,NewR) end, Object#r_object{contents=UpdatedContents, updatemetadata=dict:store(clean, true, dict:new()), @@ -261,51 +253,51 @@ bucket(#r_object{bucket=Bucket}) -> Bucket. key(#r_object{key=Key}) -> Key. %% @doc Return the logical clock for this riak_object. --spec get_vclock(riak_object(), boolean()) -> compactdvv:clock(). -get_vclock(#r_object{contents=Clock}, _WithContents = true) -> Clock; -get_vclock(#r_object{contents=Clock}, _WithContents = false) -> compactdvv:join(Clock). - +-spec get_vclock(riak_object()) -> dvvset:vector(). +get_vclock(#r_object{contents=Clock}) -> dvvset:join(Clock). %% @doc Return the number of values (siblings) of this riak_object. -spec value_count(riak_object()) -> non_neg_integer(). -value_count(#r_object{contents=Contents}) -> compactdvv:value_count(Contents). +value_count(#r_object{contents=Contents}) -> dvvset:size(Contents). %% @doc Return the contents (a list of {metadata, value} tuples) for %% this riak_object. --spec get_contents(riak_object()) -> [riak_content()]. -get_contents(#r_object{contents=Contents}) -> - Rconts = compactdvv:get_values(Contents), - [{C#r_content.metadata, C#r_content.value} || - C <- Rconts]. +-spec get_md_values(riak_object()) -> [{dict(), term()}]. +get_md_values(#r_object{contents=Contents}) -> + [{C#r_content.metadata, C#r_content.value} || C <- dvvset:values(Contents)]. + +%% @doc Return the contents (dvvset:clock()) as is. +-spec get_contents(riak_object()) -> dvvset:clock(). +get_contents(#r_object{contents=Contents}) -> Contents. %% @doc Assert that this riak_object has no siblings and return its associated %% metadata. This function will fail with a badmatch error if the %% object has siblings (value_count() > 1). -spec get_metadata(riak_object()) -> dict(). get_metadata(#r_object{contents=Contents}) -> - % this blows up intentionally (badmatch) if more than one content value! - 1 = compactdvv:value_count(Contents), - V = compactdvv:get_last_value(Contents), + % this blows up intentionally (badmatch) if more than one content value! + 1 = dvvset:size(Contents), + V = dvvset:last(fun riak_object:compare_content_dates/2, Contents), V#r_content.metadata. %% @doc Return a list of the metadata values for this riak_object. -spec get_metadatas(riak_object()) -> [dict()]. get_metadatas(#r_object{contents=C}) -> - [V#r_content.metadata || V <- compactdvv:get_values(C)]. + [V#r_content.metadata || V <- dvvset:values(C)]. %% @doc Return a list of object values for this riak_object. -spec get_values(riak_object()) -> [value()]. get_values(#r_object{contents=C}) -> - [V#r_content.value || V <- compactdvv:get_values(C)]. + [V#r_content.value || V <- dvvset:values(C)]. %% @doc Assert that this riak_object has no siblings and return its associated %% value. This function will fail with a badmatch error if the object %% has siblings (value_count() > 1). -spec get_value(riak_object()) -> value(). get_value(#r_object{contents=C}) -> - % this blows up intentionally (badmatch) if more than one content value! - 1 = compactdvv:value_count(C), - V = compactdvv:get_last_value(C), + % this blows up intentionally (badmatch) if more than one content value! + 1 = dvvset:size(C), + V = dvvset:last(fun riak_object:compare_content_dates/2, C), V#r_content.value. %% @doc Set the updated metadata of an object to M. @@ -325,16 +317,16 @@ get_update_metadata(#r_object{updatemetadata=UM}) -> UM. -spec get_update_value(riak_object()) -> value(). get_update_value(#r_object{updatevalue=UV}) -> UV. -%% @doc INTERNAL USE ONLY. Set the vclock of riak_object O to V. --spec set_vclock(riak_object(), compactdvv:clock()) -> riak_object(). -set_vclock(Object=#r_object{contents=Contents}, Clock) -> - C = case Object#r_object.updatevalue of - undefined -> - V = compactdvv:get_last_value(Contents), - compactdvv:set_value(Clock,V); - _ -> compactdvv:set_value(Clock,Object#r_object.updatevalue) - end, - Object#r_object{contents=C}. +%% @doc Set a clock of the riak_object using a version vector, +%% obtained in a get_vclock. +-spec set_vclock(riak_object(), dvvset:vector()) -> riak_object(). +set_vclock(Object, Clock) -> + Vs = get_values(apply_updates(Object)), + %% extract the causal information + VersionVector = dvvset:join(Clock), + %% set the contents to a new clock with the same causal information + %% as the version vector, but with the new list of values. + Object#r_object{contents=dvvset:new2(VersionVector,Vs)}. %% @doc INTERNAL USE ONLY. Set the contents of riak_object %% to the Contents. Normal clients should use the @@ -352,17 +344,11 @@ increment_vclock(Object=#r_object{}, Id, _TS) -> %% @doc Increment the entry for Id in O's vclock (ignore timestamp since we are not pruning). -spec increment_vclock(riak_object(), any()) -> riak_object(). increment_vclock(Object=#r_object{contents=Conts}, Id) -> - Conts2 = lists:filter(fun ({I,_,_}) -> I =/= ?NULLID end, Conts), - Value = compactdvv:get_last_value(Conts), - NewConts = compactdvv:syncupdate(Conts2, Id, Value), - set_contents(Object, NewConts). + Object#r_object{contents=dvvset:update(Conts, Id)}. -spec update_vclock(riak_object(), riak_object(), any()) -> riak_object(). update_vclock(ObjectC=#r_object{contents=ContC}, #r_object{contents=ContR}, Id) -> - ContC2 = lists:filter(fun ({I,_,_}) -> I =/= ?NULLID end, ContC), - Value = compactdvv:get_last_value(ContC), - NewConts = compactdvv:syncupdate(ContC2, ContR, Id, Value), - set_contents(ObjectC, NewConts). + ObjectC#r_object{contents=dvvset:update(ContC, ContR, Id)}. -spec update_vclock(riak_object(), riak_object(), any(), non_neg_integer()) -> riak_object(). update_vclock(ObjectC=#r_object{}, ObjectR=#r_object{}, Id, _TS) -> @@ -428,29 +414,38 @@ assemble_index_specs(Indexes, IndexOp) -> %% @doc Converts a riak_object into its JSON equivalent -spec to_json(riak_object()) -> {struct, list(any())}. to_json(Obj=#r_object{}) -> - {struct, - [ - {<<"bucket">>, bucket(Obj)}, + {CV,AV} = get_contents(Obj), + {struct, + [ {<<"bucket">>, bucket(Obj)}, {<<"key">>, key(Obj)}, {<<"contents">>, - [ - {struct, - [ - {<<"id">>, riak_kv_wm_utils:encode_vclock(Id)}, - {<<"counter">>, riak_kv_wm_utils:encode_vclock(Counter)}, - {<<"values">>, - [ - {struct, - [ - {<<"metadata">>, jsonify_metadata(Rcont#r_content.metadata)}, - {<<"value">>, Rcont#r_content.value} - ] - } || Rcont <- Values - ] - } - ] - } || {Id, Counter, Values} <- get_vclock(Obj,true) - ] + { struct, + [ {<<"causal_values">>, + [ {struct, + [ {<<"id">>, riak_kv_wm_utils:encode_vclock(Id)}, + {<<"counter">>, riak_kv_wm_utils:encode_vclock(Counter)}, + {<<"values">>, + [ {struct, + [ {<<"metadata">>, jsonify_metadata(Rcont#r_content.metadata)}, + {<<"value">>, Rcont#r_content.value} + ] + } || Rcont <- Values + ] + } + ] + } || {Id, Counter, Values} <- CV + ] + }, + {<<"anonym_values">>, + [ {struct, + [ {<<"metadata">>, jsonify_metadata(Rcont#r_content.metadata)}, + {<<"value">>, Rcont#r_content.value} + ] + } || Rcont <- AV + ] + } + ] + } } ] }. @@ -461,9 +456,9 @@ from_json({struct, Obj}) -> from_json(Obj) -> Bucket = proplists:get_value(<<"bucket">>, Obj), Key = proplists:get_value(<<"key">>, Obj), - Contents = proplists:get_value(<<"contents">>, Obj), + C = proplists:get_value(<<"contents">>, Obj), RObj0 = new(Bucket, Key, <<"">>), - set_contents(RObj0, dejsonify_contents(Contents, [])). + set_contents(RObj0, dejsonify_contents(C)). jsonify_metadata(MD) -> MDJS = fun({LastMod, Now={_,_,_}}) -> @@ -523,6 +518,13 @@ jsonify_proplist(List) -> end, dict:new(), List)). + +dejsonify_contents({struct, [{<<"causal_values">>, V},{<<"anonym_values">>, A}]}) -> + V1 = dejsonify_contents(V,[]), + A1 = dejsonify_values(A), + {V1,A1}. + + dejsonify_contents([], Accum) -> lists:reverse(Accum); dejsonify_contents([{struct, [ @@ -614,7 +616,7 @@ merge2_test() -> O1 = riak_object:increment_vclock(object_test(), node1), O2 = riak_object:increment_vclock(riak_object:new(B,K,V), node2), O3 = riak_object:syntactic_merge(O1, O2), - [node1, node2] = lists:sort([N || {N,_,_} <- riak_object:get_vclock(O3,false)]), + [node1, node2] = dvvset:ids(riak_object:get_contents(O3)), 2 = riak_object:value_count(O3). merge3_test() -> @@ -661,8 +663,9 @@ inequality_value_test() -> inequality_multivalue_test() -> O1 = riak_object:new(<<"test">>, <<"a">>, "value"), - [C] = riak_object:get_vclock(O1,true), - O1p = riak_object:set_contents(O1, [C,C]), + C = riak_object:get_contents(O1), + 1 = riak_object:value_count(O1), + O1p = riak_object:set_contents(O1, {[C,C],[]}), false = riak_object:equal(O1, O1p), false = riak_object:equal(O1p, O1). @@ -776,26 +779,35 @@ jsonify_round_trip_test() -> {?MD_INDEX, Indexes}, {?MD_LINKS, Links}]), O = riak_object:new(<<"b">>, <<"k">>, <<"{\"a\":1}">>, MD), + [V] = dvvset:values(get_contents(O)), O2 = from_json(to_json(O)), ?assertEqual(bucket(O), bucket(O2)), ?assertEqual(key(O), key(O2)), - ?assert(compactdvv:equal(get_vclock(O,true), get_vclock(O2,true))), ?assertEqual(lists:sort(Meta), lists:sort(dict:fetch(?MD_USERMETA, get_metadata(O2)))), ?assertEqual(Links, dict:fetch(?MD_LINKS, get_metadata(O2))), ?assertEqual(lists:sort(Indexes), lists:sort(index_data(O2))), - ?assertEqual(get_contents(O), get_contents(O2)), O3 = increment_vclock(O,"a"), - O4 = increment_vclock(O3,"a"), - O5 = update_vclock(O4,O3,"b"), - O3b = from_json(to_json(O3)), - O4b = from_json(to_json(O4)), + O3a = set_contents(O3, dvvset:new(dvvset:join(get_contents(O3)),V)), + O4 = increment_vclock(O3a,"a"), + O4a = set_contents(O4, dvvset:new(dvvset:join(get_contents(O4)),V)), + O5 = update_vclock(O4a,O3a,"b"), + O3b = from_json(to_json(O3a)), + O4b = from_json(to_json(O4a)), O5b = from_json(to_json(O5)), - ?assert(compactdvv:equal(get_vclock(O3,true), get_vclock(O3b,true))), - ?assertEqual(get_contents(O3), get_contents(O3b)), - ?assert(compactdvv:equal(get_vclock(O4,true), get_vclock(O4b,true))), - ?assertEqual(get_contents(O4), get_contents(O4b)), - ?assert(compactdvv:equal(get_vclock(O5,true), get_vclock(O5b,true))), - ?assertEqual(get_contents(O5), get_contents(O5b)). + io:format("~nO1:~p~n",[O3a]), + io:format("~nO3:~p~n",[O3b]), + ?assert(dvvset:equal(get_contents(O), get_contents(O2))), + ?assert(dvvset:equal(get_contents(O3a), get_contents(O3b))), + ?assert(dvvset:equal(get_contents(O4a), get_contents(O4b))), + ?assert(dvvset:equal(get_contents(O5), get_contents(O5b))), + ?assertEqual(get_contents(O), get_contents(O2)), + ?assertEqual(get_contents(O3a), get_contents(O3b)), + ?assertEqual(get_contents(O4a), get_contents(O4b)), + ?assertEqual(get_contents(O5), get_contents(O5b)), + ?assertEqual(get_md_values(O), get_md_values(O2)), + ?assertEqual(get_md_values(O3a), get_md_values(O3b)), + ?assertEqual(get_md_values(O4a), get_md_values(O4b)), + ?assertEqual(get_md_values(O5), get_md_values(O5b)). check_most_recent({V1, T1, D1}, {V2, T2, D2}) -> @@ -805,15 +817,15 @@ check_most_recent({V1, T1, D1}, {V2, T2, D2}) -> O1 = riak_object:new(<<"test">>, <<"a">>, V1, MD1), O2 = riak_object:new(<<"test">>, <<"a">>, V2, MD2), - C1 = hd(O1#r_object.contents), - C2 = hd(O2#r_object.contents), + C1 = (dvvset:last(fun riak_object:compare_content_dates/2, get_contents(O1))), + C2 = (dvvset:last(fun riak_object:compare_content_dates/2, get_contents(O2))), - C3 = most_recent_content([C1, C2]), - C4 = most_recent_content([C2, C1]), + C3 = most_recent_content(dvvset:new2([C1, C2])), + C4 = most_recent_content(dvvset:new2([C2, C1])), ?assertEqual(C3, C4), - (compactdvv:get_last_value(C3))#r_content.value. + (dvvset:last(fun riak_object:compare_content_dates/2, C3))#r_content.value. determinstic_most_recent_test() -> diff --git a/test/fsm_eqc_util.erl b/test/fsm_eqc_util.erl index 6b14874268..300e564bdb 100644 --- a/test/fsm_eqc_util.erl +++ b/test/fsm_eqc_util.erl @@ -141,7 +141,7 @@ build_riak_obj(B,K,Vc,Val,tombstone) -> add_tombstone(Obj). add_tombstone(Obj) -> - [{M,V}] = riak_object:get_contents(Obj), + [{M,V}] = riak_object:get_md_values(Obj), NewM = dict:store(<<"X-Riak-Deleted">>, true, M), riak_object:set_contents(Obj, [{NewM, V}]). From 9e96b6d0f975391cec76dbe9953c25efd55b006b Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Fri, 25 Jan 2013 18:53:29 +0000 Subject: [PATCH 20/25] Correct bug in riak_object:set_vclock and changed riak_object:new_vclock to give an empty version vector. --- src/riak_kv_vnode.erl | 2 +- src/riak_object.erl | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/riak_kv_vnode.erl b/src/riak_kv_vnode.erl index 8b0e96a21e..2edfb0bfbf 100644 --- a/src/riak_kv_vnode.erl +++ b/src/riak_kv_vnode.erl @@ -1076,7 +1076,7 @@ do_get_vclocks(KeyList,_State=#state{mod=Mod,modstate=ModState}) -> %% @private do_get_vclock({Bucket, Key}, Mod, ModState) -> case Mod:get(Bucket, Key, ModState) of - {error, not_found, _UpdModState} -> []; %riak_object:new_vclock(); + {error, not_found, _UpdModState} -> riak_object:new_vclock(); {ok, Val, _UpdModState} -> riak_object:get_vclock(binary_to_term(Val)) end. diff --git a/src/riak_object.erl b/src/riak_object.erl index a9fdd0ef2e..bc783769d2 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -100,7 +100,7 @@ new(B, K, V, MD) when is_binary(B), is_binary(K) -> end. -spec new_vclock() -> dvvset:clock(). -new_vclock() -> dvvset:new2([]). +new_vclock() -> []. %% Ensure the incoming term is a riak_object. -spec ensure_robject(any()) -> riak_object(). @@ -321,9 +321,9 @@ get_update_value(#r_object{updatevalue=UV}) -> UV. %% obtained in a get_vclock. -spec set_vclock(riak_object(), dvvset:vector()) -> riak_object(). set_vclock(Object, Clock) -> - Vs = get_values(apply_updates(Object)), + Vs = dvvset:values(get_contents(apply_updates(Object))), %% extract the causal information - VersionVector = dvvset:join(Clock), + %%VersionVector = dvvset:join(Clock), %% set the contents to a new clock with the same causal information %% as the version vector, but with the new list of values. Object#r_object{contents=dvvset:new2(VersionVector,Vs)}. From 33fefaf60c57e4ced338af38459b20c1c3e3d38d Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Mon, 28 Jan 2013 11:08:59 +0000 Subject: [PATCH 21/25] Corrected syntax error in riak_object:set_vclock/2 --- src/riak_object.erl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/riak_object.erl b/src/riak_object.erl index bc783769d2..b22ef8e6e3 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -322,11 +322,9 @@ get_update_value(#r_object{updatevalue=UV}) -> UV. -spec set_vclock(riak_object(), dvvset:vector()) -> riak_object(). set_vclock(Object, Clock) -> Vs = dvvset:values(get_contents(apply_updates(Object))), - %% extract the causal information - %%VersionVector = dvvset:join(Clock), %% set the contents to a new clock with the same causal information %% as the version vector, but with the new list of values. - Object#r_object{contents=dvvset:new2(VersionVector,Vs)}. + Object#r_object{contents=dvvset:new2(Clock,Vs)}. %% @doc INTERNAL USE ONLY. Set the contents of riak_object %% to the Contents. Normal clients should use the From f7842002098555f313d79270af6aaa2807988d4e Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Fri, 15 Feb 2013 16:42:51 +0000 Subject: [PATCH 22/25] Updated calls to functions from dvvset.erl in riak_object.erl --- rebar.config | 2 +- src/riak_object.erl | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/rebar.config b/rebar.config index c8b0967129..75de49b972 100644 --- a/rebar.config +++ b/rebar.config @@ -9,7 +9,7 @@ ]}. {deps, [ - {riak_core, ".*", {git, "git://github.com/ricardobcl/riak_core", "compactdvv"}}, + {riak_core, ".*", {git, "git://github.com/ricardobcl/riak_core", "dvvset"}}, {erlang_js, ".*", {git, "git://github.com/basho/erlang_js", "master"}}, {bitcask, ".*", {git, "git://github.com/basho/bitcask", "master"}}, {merge_index, ".*", {git, "git://github.com/basho/merge_index", diff --git a/src/riak_object.erl b/src/riak_object.erl index b22ef8e6e3..8290ab9571 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -89,11 +89,11 @@ new(B, K, V, MD) when is_binary(B), is_binary(K) -> false -> case MD of no_initial_metadata -> - Contents = dvvset:new(#r_content{metadata=dict:new(), value=V}), + Contents = dvvset:new([#r_content{metadata=dict:new(), value=V}]), #r_object{bucket=B,key=K, contents=Contents}; _ -> - Contents = dvvset:new(#r_content{metadata=MD, value=V}), + Contents = dvvset:new([#r_content{metadata=MD, value=V}]), #r_object{bucket=B,key=K,updatemetadata=MD, contents=Contents} end @@ -238,7 +238,7 @@ apply_updates(Object=#r_object{}) -> VersionVector = dvvset:join(CurrentContents), %% construct a new clock with the same causal information as the previous, %% but with the new value only. - dvvset:new(VersionVector,NewR) + dvvset:new(VersionVector,[NewR]) end, Object#r_object{contents=UpdatedContents, updatemetadata=dict:store(clean, true, dict:new()), @@ -324,7 +324,7 @@ set_vclock(Object, Clock) -> Vs = dvvset:values(get_contents(apply_updates(Object))), %% set the contents to a new clock with the same causal information %% as the version vector, but with the new list of values. - Object#r_object{contents=dvvset:new2(Clock,Vs)}. + Object#r_object{contents=dvvset:new(Clock,Vs)}. %% @doc INTERNAL USE ONLY. Set the contents of riak_object %% to the Contents. Normal clients should use the @@ -785,9 +785,9 @@ jsonify_round_trip_test() -> ?assertEqual(Links, dict:fetch(?MD_LINKS, get_metadata(O2))), ?assertEqual(lists:sort(Indexes), lists:sort(index_data(O2))), O3 = increment_vclock(O,"a"), - O3a = set_contents(O3, dvvset:new(dvvset:join(get_contents(O3)),V)), + O3a = set_contents(O3, dvvset:new(dvvset:join(get_contents(O3)),[V])), O4 = increment_vclock(O3a,"a"), - O4a = set_contents(O4, dvvset:new(dvvset:join(get_contents(O4)),V)), + O4a = set_contents(O4, dvvset:new(dvvset:join(get_contents(O4)),[V])), O5 = update_vclock(O4a,O3a,"b"), O3b = from_json(to_json(O3a)), O4b = from_json(to_json(O4a)), @@ -818,8 +818,8 @@ check_most_recent({V1, T1, D1}, {V2, T2, D2}) -> C1 = (dvvset:last(fun riak_object:compare_content_dates/2, get_contents(O1))), C2 = (dvvset:last(fun riak_object:compare_content_dates/2, get_contents(O2))), - C3 = most_recent_content(dvvset:new2([C1, C2])), - C4 = most_recent_content(dvvset:new2([C2, C1])), + C3 = most_recent_content(dvvset:new([C1, C2])), + C4 = most_recent_content(dvvset:new([C2, C1])), ?assertEqual(C3, C4), From a337eafbc34434e39570cd87ca03d038d19322cb Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Wed, 20 Feb 2013 04:27:46 +0000 Subject: [PATCH 23/25] Corrected riak_object:equal_vclock. --- src/riak_object.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/riak_object.erl b/src/riak_object.erl index 8290ab9571..77f88cd956 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -114,8 +114,8 @@ strict_descendant(#r_object{contents=C1},#r_object{contents=C2}) -> descendant(#r_object{contents=C1},#r_object{contents=C2}) -> dvvset:equal(C1,C2) orelse dvvset:less(C2,C1). --spec equal_vclock(riak_object(), riak_object()) -> boolean(). -equal_vclock(#r_object{contents=C1},#r_object{contents=C2}) -> +-spec equal_vclock(dvvset:vector(), dvvset:vector()) -> boolean(). +equal_vclock(C1, C2) -> dvvset:equal(C1,C2). -spec equal(riak_object(), riak_object()) -> boolean(). @@ -123,7 +123,7 @@ equal_vclock(#r_object{contents=C1},#r_object{contents=C2}) -> equal(Obj1,Obj2) -> (Obj1#r_object.bucket =:= Obj2#r_object.bucket) andalso (Obj1#r_object.key =:= Obj2#r_object.key) - andalso equal_vclock(Obj1,Obj2) + andalso equal_vclock(get_vclock(Obj1), get_vclock(Obj2)) andalso equal2(Obj1,Obj2). equal2(Obj1,Obj2) -> UM1 = lists:keysort(1, dict:to_list(Obj1#r_object.updatemetadata)), From 457558e7f6bdd201852ea91233fa7f16442af646 Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Thu, 28 Feb 2013 17:35:25 +0000 Subject: [PATCH 24/25] Minor reafactoring to riak_object.erl --- src/riak_object.erl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/riak_object.erl b/src/riak_object.erl index 77f88cd956..f529333819 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -83,7 +83,7 @@ new(B, K, V, C) when is_binary(B), is_binary(K), is_list(C) -> %% NOTE: Removed "is_tuple(MD)" guard to make Dialyzer happy. The previous clause %% has a guard for string(), so this clause is OK without the guard. new(B, K, V, MD) when is_binary(B), is_binary(K) -> - case size(K) > ?MAX_KEY_SIZE of + case byte_size(K) > ?MAX_KEY_SIZE of true -> throw({error,key_too_large}); false -> @@ -212,27 +212,27 @@ compare_content_dates(C1,C2) -> % @doc Promote pending updates (made with the update_value() and % update_metadata() calls) to this riak_object. -spec apply_updates(riak_object()) -> riak_object(). -apply_updates(Object=#r_object{}) -> +apply_updates(Object = #r_object{updatemetadata = Updatemetadata, + updatevalue = VL}) -> CurrentContents = get_contents(Object), UpdatedContents = - case Object#r_object.updatevalue of + case VL of undefined -> - case dict:find(clean, Object#r_object.updatemetadata) of + case dict:find(clean, Updatemetadata) of {ok,_} -> CurrentContents; %% no changes in values or metadata error -> - NewMD = dict:erase(clean,Object#r_object.updatemetadata), + NewMD = dict:erase(clean, Updatemetadata), dvvset:map( fun (R) -> #r_content{metadata=NewMD, value=R#r_content.value} end, CurrentContents) end; _ -> - MD = case dict:find(clean, Object#r_object.updatemetadata) of + MD = case dict:find(clean, Updatemetadata) of {ok,_} -> hd(get_metadatas(Object)); error -> - dict:erase(clean,Object#r_object.updatemetadata) + dict:erase(clean, Updatemetadata) end, - VL = Object#r_object.updatevalue, NewR = #r_content{metadata=MD,value=VL}, %% extract the causal information VersionVector = dvvset:join(CurrentContents), From 407d19ce798bd3088925ceb5c7d7bc07233c5b60 Mon Sep 17 00:00:00 2001 From: ricardobcl Date: Wed, 5 Jun 2013 16:51:53 +0100 Subject: [PATCH 25/25] removed some debug prints --- src/riak_kv_vnode.erl | 4 ---- src/riak_object.erl | 2 -- 2 files changed, 6 deletions(-) diff --git a/src/riak_kv_vnode.erl b/src/riak_kv_vnode.erl index 2edfb0bfbf..e2b096ea7c 100644 --- a/src/riak_kv_vnode.erl +++ b/src/riak_kv_vnode.erl @@ -814,9 +814,6 @@ prepare_put(#state{vnodeid=VId, {{true, ObjToStore}, PutArgs#putargs{index_specs=IndexSpecs, is_index=IndexBackend}}; {ok, Val, _UpdModState} -> OldObj = binary_to_term(Val), - - io:format("~nO2:~p~n",[OldObj]), - io:format("~nO3:~p~n",[RObj]), case put_merge(Coord, LWW, OldObj, RObj, VId, StartTime) of {oldobj, OldObj1} -> {{false, OldObj1}, PutArgs}; @@ -1407,7 +1404,6 @@ backend_with_known_key(BackendMod) -> B = <<"f">>, K = <<"b">>, O = riak_object:new(B, K, <<"z">>), - io:format("~nO1:~p~n",[O]), {noreply, S2} = handle_command(?KV_PUT_REQ{bkey={B,K}, object=O, req_id=123, diff --git a/src/riak_object.erl b/src/riak_object.erl index f529333819..ac5f8cf427 100644 --- a/src/riak_object.erl +++ b/src/riak_object.erl @@ -792,8 +792,6 @@ jsonify_round_trip_test() -> O3b = from_json(to_json(O3a)), O4b = from_json(to_json(O4a)), O5b = from_json(to_json(O5)), - io:format("~nO1:~p~n",[O3a]), - io:format("~nO3:~p~n",[O3b]), ?assert(dvvset:equal(get_contents(O), get_contents(O2))), ?assert(dvvset:equal(get_contents(O3a), get_contents(O3b))), ?assert(dvvset:equal(get_contents(O4a), get_contents(O4b))),