profile
viewpoint

seth/ej 175

Helper module for working with Erlang terms representing JSON

chef-cookbooks/logrotate 122

Development repository for the Chef Cookbook for logrotate

nathwill/chef-systemd 40

resource-driven chef cookbook for managing linux systems via systemd

stevendanna/cookbook-r 22

Chef Cookbook for R

chef-boneyard/delivery-cluster 20

DEPRECATED: Deployment cookbook for standing up Delivery clusters using chef-provisioning.

stevendanna/cookbook-xinetd 5

Chef cookbook for xinetd

paulmooring/knife-stalenodes 4

Knife plugin for listing stale nodes

stevendanna/basebox 2

Clean & Uniform Vagrant Basebox Definitions

stevendanna/chef 1

A systems integration framework, built to bring the benefits of configuration management to your entire infrastructure.

stevendanna/chef-sugar 1

Chef Sugar is a Gem & Chef Recipe that includes series of helpful sugar of the Chef core and other resources to make a cleaner, more lean recipd DSL, enforce DRY principles, and make writing Chef recipes an awesome experience!

Pull request review commentchef/mini_s3

Replace AWS sigv2 with sigv4

 manual_start() ->     application:start(ssl),     application:start(inets). --spec new(string(), string()) -> config().--new(AccessKeyID, SecretAccessKey) ->-    #config{-     access_key_id=AccessKeyID,-     secret_access_key=SecretAccessKey}.---spec new(string(), string(), string()) -> config().--new(AccessKeyID, SecretAccessKey, Host) ->-    #config{-     access_key_id=AccessKeyID,-     secret_access_key=SecretAccessKey,-     s3_url=Host}.---spec new(string(), string(), string(), bucket_access_type()) -> config().+-spec parse_ipv_host_domain(string()) -> {pos_integer(), list(), string()}.+parse_ipv_host_domain(Host0) ->+    case string:lexemes(Host0, "[]") of+        % ipv4+        [Host0] ->+            Domain0 = "",+            {4, Host0, Domain0};+        % ipv6+        [Scheme0,    Domain0, Port0] -> {6, lists:flatten([Scheme0,    $\r, Port0]), Domain0};+        ["http://",  Domain0       ] -> {6, lists:flatten(["http://",  $\r       ]), Domain0};+        ["https://", Domain0       ] -> {6, lists:flatten(["https://", $\r       ]), Domain0};+        [            Domain0, Port0] -> {6, lists:flatten([            $\r, Port0]), Domain0};+        [            Domain0       ] -> {6,               [            $\r       ] , Domain0}

Ah, missed that was already a bound variable

lbakerchef

comment created time in 9 hours

PullRequestReviewEvent
PullRequestReviewEvent

pull request commentchef/chef-server

fix max elasticsearch memory

@tehlers320 This looks good. When you say reconfigure fails, do you mean you were hitting the preflight validator here:

https://github.com/chef/chef-server/blob/master/omnibus/files/private-chef-cookbooks/private-chef/libraries/preflight_indexing_validator.rb#L88

or were you hitting another error?

tehlers320

comment created time in 2 days

Pull request review commentchef/chef-server

Lbaker/sigv4 bookshelf

  -module(bksw_sec). --export([is_authorized/2]).+-export([encode_access_denied_error_response/3]).+-export([is_authorized/2                      ]).+-export([parse_authorization/1                ]). --define(SECONDS_AT_EPOCH, 62167219200).+-ifdef(TEST).+-compile([export_all, nowarn_export_all       ]).+-endif.++% is this necessary?  try removing. -include("internal.hrl"). +-include_lib("erlcloud/include/erlcloud_aws.hrl").++% CODE REVIEW - choose accessor style: 1) longer style (this one) or 2) shorter style (below).+% alternatively, these could be functions.+%+% until erlang gets a shorthand way to access a key's value+%-define(ACCESSKEY(Auth),          maps:get(accesskey,          Auth)).+%-define(CONFIG(Auth),             maps:get(config,             Auth)).+%-define(ALT_SIGNED_HEADERS(Auth), maps:get(alt_signed_headers, Auth)).+%-define(METHOD(Auth),             maps:get(method,             Auth)).+%-define(PATH(Auth),               maps:get(path,               Auth)).+%-define(REQ(Auth),                maps:get(req,                Auth)).+%-define(REQID(Auth),              maps:get(reqid,              Auth)).++% CODE REVIEW - choose accessor style: 1) longer style (above) or 2) shorter style (this one).+%+% until erlang gets a shorthand way to access a key's value+-define(ACCESSKEY,          maps:get(accesskey,          Auth)).+-define(CONFIG,             maps:get(config,             Auth)).+-define(ALT_SIGNED_HEADERS, maps:get(alt_signed_headers, Auth)).+-define(METHOD,             maps:get(method,             Auth)).+-define(PATH,               maps:get(path,               Auth)).+-define(REQ,                maps:get(req,                Auth)).+-define(REQID,              maps:get(reqid,              Auth)).+ %%=================================================================== %% API functions %%===================================================================-is_authorized(Req0, #context{auth_check_disabled=true} = Context) ->-    {true, Req0, Context};-is_authorized(Req0, #context{} = Context) ->-    Headers = mochiweb_headers:to_list(wrq:req_headers(Req0)),-    {RequestId, Req1} = bksw_req:with_amz_request_id(Req0),-    case proplists:get_value('Authorization', Headers, undefined) of-        undefined ->-            do_signed_url_authorization(RequestId, Req1, Context);-        IncomingAuth ->-            do_standard_authorization(RequestId, IncomingAuth, Req1, Context)++is_authorized(Req0, #context{auth_check_disabled = true          } = Context) -> {true, Req0, Context};+is_authorized(Req0, #context{auth_type           = presigned_url,+                             date                = Date,+                             incoming_sig        = IncomingSignature,+                             signed_headers      = SignedHeaders,+                             x_amz_expires_int   = XAmzExpiresInt} = Context) ->+    Auth               = auth_init(Req0, Context, SignedHeaders),+    {Bucketname, Key } = get_bucket_key(?PATH),+    ComparisonURL      = mini_s3:s3_url(?METHOD, Bucketname, Key, XAmzExpiresInt, SignedHeaders, Date, ?CONFIG),+    IncomingSig        = list_to_binary(IncomingSignature),+    [_, ComparisonSig] = string:split(ComparisonURL, "&X-Amz-Signature=", trailing),++    % TODO: try to remove alt sig computation and see what happens+    % NOTE: this was tried, and caused compilation and test failures.++    CalculatedSig =+        case IncomingSig of+            ComparisonSig ->+                %AltComparisonSig = "not computed",+                IncomingSig;+            _ ->+                AltComparisonURL      = mini_s3:s3_url(?METHOD, Bucketname, Key, XAmzExpiresInt, ?ALT_SIGNED_HEADERS, Date, ?CONFIG),+                [_, AltComparisonSig] = string:split(AltComparisonURL, "&X-Amz-Signature=", all),

Why do we use all here but trailing at line 74?

lbakerchef

comment created time in 3 days

Pull request review commentchef/chef-server

Lbaker/sigv4 bookshelf

  -module(bksw_sec). --export([is_authorized/2]).+-export([encode_access_denied_error_response/3]).+-export([is_authorized/2                      ]).+-export([parse_authorization/1                ]). --define(SECONDS_AT_EPOCH, 62167219200).+-ifdef(TEST).+-compile([export_all, nowarn_export_all       ]).+-endif.++% is this necessary?  try removing. -include("internal.hrl"). +-include_lib("erlcloud/include/erlcloud_aws.hrl").++% CODE REVIEW - choose accessor style: 1) longer style (this one) or 2) shorter style (below).+% alternatively, these could be functions.+%+% until erlang gets a shorthand way to access a key's value+%-define(ACCESSKEY(Auth),          maps:get(accesskey,          Auth)).+%-define(CONFIG(Auth),             maps:get(config,             Auth)).+%-define(ALT_SIGNED_HEADERS(Auth), maps:get(alt_signed_headers, Auth)).+%-define(METHOD(Auth),             maps:get(method,             Auth)).+%-define(PATH(Auth),               maps:get(path,               Auth)).+%-define(REQ(Auth),                maps:get(req,                Auth)).+%-define(REQID(Auth),              maps:get(reqid,              Auth)).++% CODE REVIEW - choose accessor style: 1) longer style (above) or 2) shorter style (this one).+%+% until erlang gets a shorthand way to access a key's value+-define(ACCESSKEY,          maps:get(accesskey,          Auth)).+-define(CONFIG,             maps:get(config,             Auth)).+-define(ALT_SIGNED_HEADERS, maps:get(alt_signed_headers, Auth)).+-define(METHOD,             maps:get(method,             Auth)).+-define(PATH,               maps:get(path,               Auth)).+-define(REQ,                maps:get(req,                Auth)).+-define(REQID,              maps:get(reqid,              Auth)).+ %%=================================================================== %% API functions %%===================================================================-is_authorized(Req0, #context{auth_check_disabled=true} = Context) ->-    {true, Req0, Context};-is_authorized(Req0, #context{} = Context) ->-    Headers = mochiweb_headers:to_list(wrq:req_headers(Req0)),-    {RequestId, Req1} = bksw_req:with_amz_request_id(Req0),-    case proplists:get_value('Authorization', Headers, undefined) of-        undefined ->-            do_signed_url_authorization(RequestId, Req1, Context);-        IncomingAuth ->-            do_standard_authorization(RequestId, IncomingAuth, Req1, Context)++is_authorized(Req0, #context{auth_check_disabled = true          } = Context) -> {true, Req0, Context};+is_authorized(Req0, #context{auth_type           = presigned_url,+                             date                = Date,+                             incoming_sig        = IncomingSignature,+                             signed_headers      = SignedHeaders,+                             x_amz_expires_int   = XAmzExpiresInt} = Context) ->+    Auth               = auth_init(Req0, Context, SignedHeaders),+    {Bucketname, Key } = get_bucket_key(?PATH),+    ComparisonURL      = mini_s3:s3_url(?METHOD, Bucketname, Key, XAmzExpiresInt, SignedHeaders, Date, ?CONFIG),+    IncomingSig        = list_to_binary(IncomingSignature),+    [_, ComparisonSig] = string:split(ComparisonURL, "&X-Amz-Signature=", trailing),++    % TODO: try to remove alt sig computation and see what happens+    % NOTE: this was tried, and caused compilation and test failures.++    CalculatedSig =+        case IncomingSig of+            ComparisonSig ->+                %AltComparisonSig = "not computed",+                IncomingSig;+            _ ->+                AltComparisonURL      = mini_s3:s3_url(?METHOD, Bucketname, Key, XAmzExpiresInt, ?ALT_SIGNED_HEADERS, Date, ?CONFIG),+                [_, AltComparisonSig] = string:split(AltComparisonURL, "&X-Amz-Signature=", all),+                AltComparisonSig+        end,+    auth_finish(Auth, Context, ComparisonURL, IncomingSig, CalculatedSig);+is_authorized(Req0, #context{auth_type           = auth_header,+                             date                = Date,+                             incoming_sig        = IncomingSignature,+                             region              = Region,+                             signed_headers      = SignedHeaders} = Context) ->+    Auth              = auth_init(Req0, Context, SignedHeaders),+    ComparisonURL     = "not-applicable",+    QueryParams       = wrq:req_qs(?REQ),+    SigV4Headers      = erlcloud_aws:sign_v4(?METHOD, ?PATH, ?CONFIG, SignedHeaders, <<>>, Region, "s3", QueryParams, Date),+    IncomingSig       = IncomingSignature,+    ComparisonSig     = parseauth_or_throw(proplists:get_value("Authorization", SigV4Headers, ""), {?REQID, ?REQ, Context}),++    % TODO: try to remove alt sig computation and see what happens+    % NOTE: this was tried, and caused compilation and test failures.++    CalculatedSig =+        case IncomingSig of+            ComparisonSig ->+                %AltComparisonSig = "not computed",+                IncomingSig;+            _ ->+                AltSigV4Headers   = erlcloud_aws:sign_v4(?METHOD, ?PATH, ?CONFIG, ?ALT_SIGNED_HEADERS, <<>>, Region, "s3", QueryParams, Date),+                _AltComparisonSig = parseauth_or_throw(proplists:get_value("Authorization", AltSigV4Headers, ""), {?REQID, ?REQ, Context})+        end,+    auth_finish(Auth, Context, ComparisonURL, IncomingSig, CalculatedSig).++% @doc split authorization header into component parts+% https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html+-spec parse_authorization(string()) -> {ok, [string()]} | {error, parse_authorization}.+parse_authorization(Authorization) ->+    case string:split(Authorization, " ", all) of+        ["AWS4-HMAC-SHA256", "Credential="++Cred, "SignedHeaders="++SigHead, "Signature="++Signature] ->+            {ok, [string:trim(Cred, trailing, ","), string:trim(SigHead, trailing, ","), Signature]};+        _ ->+            {error, parse_authorization}     end. -do_signed_url_authorization(RequestId, Req0, #context{reqid = ReqId} = Context) ->-    AWSAccessKeyId = wrq:get_qs_value("AWSAccessKeyId", Req0),-    Expires = wrq:get_qs_value("Expires", Req0),-    IncomingSignature = wrq:get_qs_value("Signature", Req0),-    RawMethod = wrq:method(Req0),-    Method = string:to_lower(erlang:atom_to_list(RawMethod)),-    Headers = mochiweb_headers:to_list(wrq:req_headers(Req0)),-    Path  = wrq:path(Req0),-    AccessKey = bksw_conf:access_key_id(Context),-    SecretKey = bksw_conf:secret_access_key(Context),-    ExpireDiff = expire_diff(Expires),-    case make_signed_url_authorization(SecretKey,-                                       Method,-                                       Path,-                                       Expires,-                                       Headers) of-        {StringToSign, Signature} ->-            case ExpireDiff =< 0 of+encode_access_denied_error_response(RequestId, Req0, Context) ->+    Req1 = bksw_req:with_amz_id_2(Req0),+    Body = bksw_xml:access_denied_error(RequestId),+    Req2 = wrq:set_resp_body(Body, Req1),+    {{halt, 403}, Req2, Context}.++%%===================================================================+% local functions, helpers, etc.+%%===================================================================++% common setup, init+-spec auth_init(any(), tuple(), string()) -> map().+auth_init(Req0, Context, SignedHeaders) ->+    AccessKey            =  bksw_conf:access_key_id(Context),+    {RequestId, Req1}    =  bksw_req:with_amz_request_id(Req0),+    Config               =  mini_s3:new(AccessKey, bksw_conf:secret_access_key(Context), host(Req1)),+    #{accesskey          => AccessKey,+      config             => Config,+      alt_signed_headers => [case {K, V} of {"host", _} -> {"host", get_host_toggleport(host(Req1), Config)}; _ -> {K, V} end || {K, V} <- SignedHeaders],+      method             => list_to_atom(string:to_lower(erlang:atom_to_list(wrq:method(Req1)))),+      path               => wrq:path(Req1),+      req                => Req1,+      reqid              => RequestId}.++% TODO: spec+auth_finish(Auth, #context{+                     aws_access_key_id = AWSAccessKeyId,+                     date              = Date,+                     reqid             = ReqId,+                     x_amz_expires_int = XAmzExpiresInt,+                     x_amz_expires_str = XAmzExpiresString+                  } = Context, ComparisonURL, IncomingSig, CalculatedSig) ->+    case IncomingSig of+        CalculatedSig ->+            case is_expired(Date, XAmzExpiresInt) of                 true ->-                    ?LOG_DEBUG("req_id=~p expired signature (~p) for ~p",-                               [ReqId, Expires, Path]),-                    encode_access_denied_error_response(RequestId, Req0, Context);+                    ?LOG_DEBUG("req_id=~p expired signature (~p) for ~p", [ReqId, XAmzExpiresInt, ?PATH]),+                    encode_access_denied_error_response(?REQID, ?REQ, Context);                 false ->-                    case ((erlang:iolist_to_binary(AWSAccessKeyId) ==-                               erlang:iolist_to_binary(AccessKey)) andalso-                          erlang:iolist_to_binary(Signature) ==-                              erlang:iolist_to_binary(IncomingSignature)) of+                    case erlang:iolist_to_binary(AWSAccessKeyId) == erlang:iolist_to_binary(?ACCESSKEY) of                         true ->-                            MaxAge = "max-age=" ++ integer_to_list(ExpireDiff),-                            Req1 = wrq:set_resp_header("Cache-Control", MaxAge, Req0),-                            {true, Req1, Context};+                            MaxAge = "max-age=" ++ XAmzExpiresString,+                            Req2   = wrq:set_resp_header("Cache-Control", MaxAge, ?REQ),+                            {true, Req2, Context};                         false ->-                            ?LOG_DEBUG("req_id=~p signing error for ~p", [ReqId, Path]),-                            encode_sign_error_response(AWSAccessKeyId, IncomingSignature, RequestId,-                                                       StringToSign, Req0, Context)+                            ?LOG_DEBUG("req_id=~p signing error for ~p", [ReqId, ?PATH]),+                            encode_sign_error_response(AWSAccessKeyId, IncomingSig, ?REQID,+                                                       ComparisonURL, ?REQ, Context)

I wonder if we could change the error message in this encode_sing_error_reponse a bit such that we return useful information in the case where we don't have a ComparisonURL.

lbakerchef

comment created time in 3 days

Pull request review commentchef/chef-server

Lbaker/sigv4 bookshelf

  -module(bksw_sec). --export([is_authorized/2]).+-export([encode_access_denied_error_response/3]).+-export([is_authorized/2                      ]).+-export([parse_authorization/1                ]). --define(SECONDS_AT_EPOCH, 62167219200).+-ifdef(TEST).+-compile([export_all, nowarn_export_all       ]).+-endif.++% is this necessary?  try removing. -include("internal.hrl"). +-include_lib("erlcloud/include/erlcloud_aws.hrl").++% CODE REVIEW - choose accessor style: 1) longer style (this one) or 2) shorter style (below).+% alternatively, these could be functions.+%+% until erlang gets a shorthand way to access a key's value+%-define(ACCESSKEY(Auth),          maps:get(accesskey,          Auth)).+%-define(CONFIG(Auth),             maps:get(config,             Auth)).+%-define(ALT_SIGNED_HEADERS(Auth), maps:get(alt_signed_headers, Auth)).+%-define(METHOD(Auth),             maps:get(method,             Auth)).+%-define(PATH(Auth),               maps:get(path,               Auth)).+%-define(REQ(Auth),                maps:get(req,                Auth)).+%-define(REQID(Auth),              maps:get(reqid,              Auth)).++% CODE REVIEW - choose accessor style: 1) longer style (above) or 2) shorter style (this one).+%+% until erlang gets a shorthand way to access a key's value+-define(ACCESSKEY,          maps:get(accesskey,          Auth)).+-define(CONFIG,             maps:get(config,             Auth)).+-define(ALT_SIGNED_HEADERS, maps:get(alt_signed_headers, Auth)).+-define(METHOD,             maps:get(method,             Auth)).+-define(PATH,               maps:get(path,               Auth)).+-define(REQ,                maps:get(req,                Auth)).+-define(REQID,              maps:get(reqid,              Auth)).+ %%=================================================================== %% API functions %%===================================================================-is_authorized(Req0, #context{auth_check_disabled=true} = Context) ->-    {true, Req0, Context};-is_authorized(Req0, #context{} = Context) ->-    Headers = mochiweb_headers:to_list(wrq:req_headers(Req0)),-    {RequestId, Req1} = bksw_req:with_amz_request_id(Req0),-    case proplists:get_value('Authorization', Headers, undefined) of-        undefined ->-            do_signed_url_authorization(RequestId, Req1, Context);-        IncomingAuth ->-            do_standard_authorization(RequestId, IncomingAuth, Req1, Context)++is_authorized(Req0, #context{auth_check_disabled = true          } = Context) -> {true, Req0, Context};+is_authorized(Req0, #context{auth_type           = presigned_url,+                             date                = Date,+                             incoming_sig        = IncomingSignature,+                             signed_headers      = SignedHeaders,+                             x_amz_expires_int   = XAmzExpiresInt} = Context) ->+    Auth               = auth_init(Req0, Context, SignedHeaders),+    {Bucketname, Key } = get_bucket_key(?PATH),+    ComparisonURL      = mini_s3:s3_url(?METHOD, Bucketname, Key, XAmzExpiresInt, SignedHeaders, Date, ?CONFIG),+    IncomingSig        = list_to_binary(IncomingSignature),+    [_, ComparisonSig] = string:split(ComparisonURL, "&X-Amz-Signature=", trailing),++    % TODO: try to remove alt sig computation and see what happens+    % NOTE: this was tried, and caused compilation and test failures.++    CalculatedSig =+        case IncomingSig of+            ComparisonSig ->+                %AltComparisonSig = "not computed",+                IncomingSig;+            _ ->+                AltComparisonURL      = mini_s3:s3_url(?METHOD, Bucketname, Key, XAmzExpiresInt, ?ALT_SIGNED_HEADERS, Date, ?CONFIG),+                [_, AltComparisonSig] = string:split(AltComparisonURL, "&X-Amz-Signature=", all),+                AltComparisonSig+        end,+    auth_finish(Auth, Context, ComparisonURL, IncomingSig, CalculatedSig);+is_authorized(Req0, #context{auth_type           = auth_header,+                             date                = Date,+                             incoming_sig        = IncomingSignature,+                             region              = Region,+                             signed_headers      = SignedHeaders} = Context) ->+    Auth              = auth_init(Req0, Context, SignedHeaders),+    ComparisonURL     = "not-applicable",+    QueryParams       = wrq:req_qs(?REQ),+    SigV4Headers      = erlcloud_aws:sign_v4(?METHOD, ?PATH, ?CONFIG, SignedHeaders, <<>>, Region, "s3", QueryParams, Date),+    IncomingSig       = IncomingSignature,+    ComparisonSig     = parseauth_or_throw(proplists:get_value("Authorization", SigV4Headers, ""), {?REQID, ?REQ, Context}),++    % TODO: try to remove alt sig computation and see what happens+    % NOTE: this was tried, and caused compilation and test failures.++    CalculatedSig =+        case IncomingSig of+            ComparisonSig ->+                %AltComparisonSig = "not computed",+                IncomingSig;+            _ ->+                AltSigV4Headers   = erlcloud_aws:sign_v4(?METHOD, ?PATH, ?CONFIG, ?ALT_SIGNED_HEADERS, <<>>, Region, "s3", QueryParams, Date),+                _AltComparisonSig = parseauth_or_throw(proplists:get_value("Authorization", AltSigV4Headers, ""), {?REQID, ?REQ, Context})+        end,+    auth_finish(Auth, Context, ComparisonURL, IncomingSig, CalculatedSig).++% @doc split authorization header into component parts+% https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html+-spec parse_authorization(string()) -> {ok, [string()]} | {error, parse_authorization}.+parse_authorization(Authorization) ->+    case string:split(Authorization, " ", all) of+        ["AWS4-HMAC-SHA256", "Credential="++Cred, "SignedHeaders="++SigHead, "Signature="++Signature] ->+            {ok, [string:trim(Cred, trailing, ","), string:trim(SigHead, trailing, ","), Signature]};+        _ ->+            {error, parse_authorization}     end. -do_signed_url_authorization(RequestId, Req0, #context{reqid = ReqId} = Context) ->-    AWSAccessKeyId = wrq:get_qs_value("AWSAccessKeyId", Req0),-    Expires = wrq:get_qs_value("Expires", Req0),-    IncomingSignature = wrq:get_qs_value("Signature", Req0),-    RawMethod = wrq:method(Req0),-    Method = string:to_lower(erlang:atom_to_list(RawMethod)),-    Headers = mochiweb_headers:to_list(wrq:req_headers(Req0)),-    Path  = wrq:path(Req0),-    AccessKey = bksw_conf:access_key_id(Context),-    SecretKey = bksw_conf:secret_access_key(Context),-    ExpireDiff = expire_diff(Expires),-    case make_signed_url_authorization(SecretKey,-                                       Method,-                                       Path,-                                       Expires,-                                       Headers) of-        {StringToSign, Signature} ->-            case ExpireDiff =< 0 of+encode_access_denied_error_response(RequestId, Req0, Context) ->+    Req1 = bksw_req:with_amz_id_2(Req0),+    Body = bksw_xml:access_denied_error(RequestId),+    Req2 = wrq:set_resp_body(Body, Req1),+    {{halt, 403}, Req2, Context}.++%%===================================================================+% local functions, helpers, etc.+%%===================================================================++% common setup, init+-spec auth_init(any(), tuple(), string()) -> map().+auth_init(Req0, Context, SignedHeaders) ->+    AccessKey            =  bksw_conf:access_key_id(Context),+    {RequestId, Req1}    =  bksw_req:with_amz_request_id(Req0),+    Config               =  mini_s3:new(AccessKey, bksw_conf:secret_access_key(Context), host(Req1)),+    #{accesskey          => AccessKey,+      config             => Config,+      alt_signed_headers => [case {K, V} of {"host", _} -> {"host", get_host_toggleport(host(Req1), Config)}; _ -> {K, V} end || {K, V} <- SignedHeaders],+      method             => list_to_atom(string:to_lower(erlang:atom_to_list(wrq:method(Req1)))),+      path               => wrq:path(Req1),+      req                => Req1,+      reqid              => RequestId}.++% TODO: spec+auth_finish(Auth, #context{+                     aws_access_key_id = AWSAccessKeyId,+                     date              = Date,+                     reqid             = ReqId,+                     x_amz_expires_int = XAmzExpiresInt,+                     x_amz_expires_str = XAmzExpiresString+                  } = Context, ComparisonURL, IncomingSig, CalculatedSig) ->+    case IncomingSig of+        CalculatedSig ->+            case is_expired(Date, XAmzExpiresInt) of                 true ->-                    ?LOG_DEBUG("req_id=~p expired signature (~p) for ~p",-                               [ReqId, Expires, Path]),-                    encode_access_denied_error_response(RequestId, Req0, Context);+                    ?LOG_DEBUG("req_id=~p expired signature (~p) for ~p", [ReqId, XAmzExpiresInt, ?PATH]),+                    encode_access_denied_error_response(?REQID, ?REQ, Context);                 false ->-                    case ((erlang:iolist_to_binary(AWSAccessKeyId) ==-                               erlang:iolist_to_binary(AccessKey)) andalso-                          erlang:iolist_to_binary(Signature) ==-                              erlang:iolist_to_binary(IncomingSignature)) of+                    case erlang:iolist_to_binary(AWSAccessKeyId) == erlang:iolist_to_binary(?ACCESSKEY) of                         true ->-                            MaxAge = "max-age=" ++ integer_to_list(ExpireDiff),-                            Req1 = wrq:set_resp_header("Cache-Control", MaxAge, Req0),-                            {true, Req1, Context};+                            MaxAge = "max-age=" ++ XAmzExpiresString,+                            Req2   = wrq:set_resp_header("Cache-Control", MaxAge, ?REQ),+                            {true, Req2, Context};                         false ->-                            ?LOG_DEBUG("req_id=~p signing error for ~p", [ReqId, Path]),-                            encode_sign_error_response(AWSAccessKeyId, IncomingSignature, RequestId,-                                                       StringToSign, Req0, Context)+                            ?LOG_DEBUG("req_id=~p signing error for ~p", [ReqId, ?PATH]),+                            encode_sign_error_response(AWSAccessKeyId, IncomingSig, ?REQID,+                                                       ComparisonURL, ?REQ, Context)

It's hard to see in this diff, but I think we need to return this encode_sign_error_response in one more case which is where the IncomingSig != CalculatedSig

lbakerchef

comment created time in 3 days

PullRequestReviewEvent
PullRequestReviewEvent

issue commentchef/chef

Merge knife-opc into core Chef

@snehaldwivedi Apologies but I don't think I am going to remember the full details of a comment I made 5 years ago. However, looking at the opc_user_delete code, we have a large amount of code that gets all of the organizations for the user and then dissociates the user from each of those organizations before deleting them:

https://github.com/chef/knife-opc/blob/master/lib/chef/knife/opc_user_delete.rb#L50-L69

which was explicitly added by support:

https://github.com/chef/knife-opc/pull/6

from Slack it appears this was added as a result of a support ticket. That support ticket is in an older support ticketing system, but I have a partial email record of the ticket. As part of that ticket, the user was concerned with dangling references to a now deleted user:

a concern was raised here that removing the users without disassociating them might leave dangling references and cause problems further on; Apparently we have run into this issue before. Just wanted to double check with you before doing anything.

My strong suspicion is that it is possible (likely even) that the issue being referred to was in an ancient version of Chef Server that ran against a completely different database (CouchDB) than the current server. In the current server, deleting a user should cause a cascading deleting into the org_user_associations table.

However, all that said, this code has been working well for people without issue and it has subsequently gained various client-side validation features to prevent users from making a few common mistakes, so I think you can safely ignore the comment for now and preseve the current feature.

stevendanna

comment created time in 3 days

Pull request review commentchef/mini_s3

Replace AWS sigv2 with sigv4

 manual_start() ->     application:start(ssl),     application:start(inets). --spec new(string(), string()) -> config().--new(AccessKeyID, SecretAccessKey) ->-    #config{-     access_key_id=AccessKeyID,-     secret_access_key=SecretAccessKey}.---spec new(string(), string(), string()) -> config().--new(AccessKeyID, SecretAccessKey, Host) ->-    #config{-     access_key_id=AccessKeyID,-     secret_access_key=SecretAccessKey,-     s3_url=Host}.---spec new(string(), string(), string(), bucket_access_type()) -> config().+-spec parse_ipv_host_domain(string()) -> {pos_integer(), list(), string()}.+parse_ipv_host_domain(Host0) ->+    case string:lexemes(Host0, "[]") of+        % ipv4+        [Host0] ->+            Domain0 = "",+            {4, Host0, Domain0};+        % ipv6+        [Scheme0,    Domain0, Port0] -> {6, lists:flatten([Scheme0,    $\r, Port0]), Domain0};+        ["http://",  Domain0       ] -> {6, lists:flatten(["http://",  $\r       ]), Domain0};+        ["https://", Domain0       ] -> {6, lists:flatten(["https://", $\r       ]), Domain0};+        [            Domain0, Port0] -> {6, lists:flatten([            $\r, Port0]), Domain0};+        [            Domain0       ] -> {6,               [            $\r       ] , Domain0}+    end. +-spec new(string() | binary(), string() | binary(), string()) -> aws_config().+new(AccessKeyID, SecretAccessKey, Host0) ->+    % chef-server crams scheme://host:port all into into Host; erlcloud wants them separate.+    % Assume:+    %   Host   == scheme://domain:port | scheme://domain | domain:port | domain+    %   scheme == http | https++    % ipv4/6 detection+    {Ipv, Host, Domain0} = parse_ipv_host_domain(Host0),

Have we looked into using this: http://erlang.org/doc//man/uri_string.html I wonder if it might get us most of what we want? But, you've written enough tests that this isn't my biggest concern, so no worries if not.

lbakerchef

comment created time in 4 days

Pull request review commentchef/mini_s3

Replace AWS sigv2 with sigv4

 %% -*- tab-width: 4;erlang-indent-level: 4;indent-tabs-mode: nil -*- %% ex: ts=4 sw=4 ft=erlang et {deps, [-        {ibrowse, ".*",-         %% Pin here, becase 555f707 (pr #155) introduces an ipv6 bug we've not fixed-         {git, "https://github.com/cmullaparthi/ibrowse.git", {ref, "c97136cfb61fcc6f39d4e7da47372a64f7fca04e"}}},-        {envy, {git, "https://github.com/markan/envy.git", {branch, "master"}}}+        %% Pin here, becase 555f707 (pr #155) introduces an ipv6 bug we've not fixed

I think this comment was related to ibrowse, so it probably can go as well.

lbakerchef

comment created time in 4 days

Pull request review commentchef/mini_s3

Replace AWS sigv2 with sigv4

 manual_start() ->     application:start(ssl),     application:start(inets). --spec new(string(), string()) -> config().--new(AccessKeyID, SecretAccessKey) ->-    #config{-     access_key_id=AccessKeyID,-     secret_access_key=SecretAccessKey}.---spec new(string(), string(), string()) -> config().--new(AccessKeyID, SecretAccessKey, Host) ->-    #config{-     access_key_id=AccessKeyID,-     secret_access_key=SecretAccessKey,-     s3_url=Host}.---spec new(string(), string(), string(), bucket_access_type()) -> config().+-spec parse_ipv_host_domain(string()) -> {pos_integer(), list(), string()}.+parse_ipv_host_domain(Host0) ->+    case string:lexemes(Host0, "[]") of+        % ipv4+        [Host0] ->+            Domain0 = "",+            {4, Host0, Domain0};+        % ipv6+        [Scheme0,    Domain0, Port0] -> {6, lists:flatten([Scheme0,    $\r, Port0]), Domain0};+        ["http://",  Domain0       ] -> {6, lists:flatten(["http://",  $\r       ]), Domain0};+        ["https://", Domain0       ] -> {6, lists:flatten(["https://", $\r       ]), Domain0};+        [            Domain0, Port0] -> {6, lists:flatten([            $\r, Port0]), Domain0};+        [            Domain0       ] -> {6,               [            $\r       ] , Domain0}

Can we actually hit this case, it seems like it would be covered by the first case?

lbakerchef

comment created time in 4 days

PullRequestReviewEvent
PullRequestReviewEvent

Pull request review commentchef/erlcloud

Presigned URLs

 make_link(Expire_time, BucketName, Key, Config) ->  -spec get_object_url(string(), string()) -> string(). - get_object_url(BucketName, Key) ->+get_object_url(BucketName, Key) ->   get_object_url(BucketName, Key, default_config()).  -spec get_object_url(string(), string(), aws_config()) -> string(). - get_object_url(BucketName, Key, Config) ->+get_object_url(BucketName, Key, Config) ->   case Config#aws_config.s3_bucket_after_host of       false -> lists:flatten([Config#aws_config.s3_scheme, BucketName, ".", Config#aws_config.s3_host, port_spec(Config), "/", Key]);       true  -> lists:flatten([Config#aws_config.s3_scheme, Config#aws_config.s3_host, port_spec(Config), "/", BucketName, "/", Key])   end. +-spec get_object_url_elements(string(), string(), aws_config()) -> {Host::string(), Path::string(), URL::string()}.+get_object_url_elements(BucketName, Key, Config) ->+    Key0 = case lists:prefix("/", Key) of+               true -> Key;+               false -> "/" ++ Key+           end,++    case Config#aws_config.s3_bucket_after_host of+        false ->+            Host = BucketName ++ "." ++ Config#aws_config.s3_host,+            {Host, Key0, lists:flatten([Config#aws_config.s3_scheme, Host, port_spec(Config), Key0])};+        true  ->+            Host = Config#aws_config.s3_host,+            Path = lists:flatten(["/", BucketName, Key0]),+            {Host, Path, lists:flatten([Config#aws_config.s3_scheme, Host, port_spec(Config), Path])}+    end.++-spec signature(aws_config(), string(), string(), string(), atom(), proplist(), proplist(), string()) -> string().+signature(Config, Path, Date, Region, Method, QueryParams, Headers, Payload) ->+  Service = "s3",+  CredentialScope = erlcloud_aws:credential_scope(Date, Region, Service),+  {CanonicalRequest, SignedHeaders} = erlcloud_aws:canonical_request(Method, Path, QueryParams, Headers, Payload),+  ToSign = erlcloud_aws:to_sign(Date, CredentialScope, CanonicalRequest),+  SigningKey = erlcloud_aws:signing_key(Config, Date, Region, Service),+  [Result] = erlcloud_aws:base16(erlcloud_util:sha256_mac(SigningKey, ToSign)),+  Result.

I don't see many tests for this function. It might be nice to generate some pre-signed URLs with another tool and then use the results of that as test data that we could use to validate this function.

lbakerchef

comment created time in 4 days

Pull request review commentchef/erlcloud

Presigned URLs

 sign_v4_headers(Config, Headers, Payload, Region, Service) -> -spec sign_v4(atom(), list(), aws_config(), headers(), string() | binary(), string(), string(), list()) -> headers(). sign_v4(Method, Uri, Config, Headers, Payload, Region, Service, QueryParams) ->     Date = iso_8601_basic_time(),-    {PayloadHash, Headers1} =-        sign_v4_content_sha256_header( [{"x-amz-date", Date} | Headers], Payload ),-    Headers2 = case Config#aws_config.security_token of-                   undefined -> Headers1;-                   Token -> [{"x-amz-security-token", Token} | Headers1]+    sign_v4(Method, Uri, Config, Headers, Payload, Region, Service, QueryParams, Date).++-spec sign_v4(atom(), list(), aws_config(), headers(), string() | binary(), string(), string(), list(), string()) -> headers().+sign_v4(Method, Uri, Config, Headers0, Payload, Region, Service, QueryParams, Date0) ->+    % use passed-in x-amz-date header or create one+    Headers1 =+        case proplists:get_value("x-amz-date", Headers0) of+            undefined ->+                Date = Date0,

Is this unused?

lbakerchef

comment created time in 4 days

Pull request review commentchef/erlcloud

Presigned URLs

 sign_v4_headers(Config, Headers, Payload, Region, Service) -> -spec sign_v4(atom(), list(), aws_config(), headers(), string() | binary(), string(), string(), list()) -> headers(). sign_v4(Method, Uri, Config, Headers, Payload, Region, Service, QueryParams) ->     Date = iso_8601_basic_time(),-    {PayloadHash, Headers1} =-        sign_v4_content_sha256_header( [{"x-amz-date", Date} | Headers], Payload ),-    Headers2 = case Config#aws_config.security_token of-                   undefined -> Headers1;-                   Token -> [{"x-amz-security-token", Token} | Headers1]+    sign_v4(Method, Uri, Config, Headers, Payload, Region, Service, QueryParams, Date).++-spec sign_v4(atom(), list(), aws_config(), headers(), string() | binary(), string(), string(), list(), string()) -> headers().+sign_v4(Method, Uri, Config, Headers0, Payload, Region, Service, QueryParams, Date0) ->+    % use passed-in x-amz-date header or create one

I find the semantics of this a little strange in that we are passing an explicit date but the x-amz-date command overrides this. I assume this is because of the double-duty this is doing for verifying our signatures (which will likely have the date header coming in via Headers) and generating new signatures (which I am assuming won't have the date header coming in via Headers)

lbakerchef

comment created time in 4 days

PullRequestReviewEvent
PullRequestReviewEvent

fork stevendanna/go.uuid

UUID package for Go

fork in 5 days

PullRequestReviewEvent
PullRequestReviewEvent
PullRequestReviewEvent

Pull request review commentchef/automate

Updating current ssl cipher list

 Uncomment and change settings as needed, and then run `chef-automate config patc # large_client_header_buffers_size = "8k" # large_client_header_buffers_number = 4 # sendfile = "on"-# ssl_ciphers = "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:AES256-GCM-SHA384:!aNULL:!eNULL:!EXPORT"+# ssl_ciphers = "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:!aNULL:!eNULL:!EXPORT"

I think AES256-GCM-SHA384 should still be in this list.

jmassardo

comment created time in 8 days

PullRequestReviewEvent
GollumEvent

push eventchef/automate

Steven Danna

commit sha cb7366427c9e29d6a27b550d8f3321ba40b21b67

automate-gateway: use correct swagger configuration (#4408) This got reset by re-vendoring the dependency. Signed-off-by: Steven Danna <steve@chef.io>

view details

push time in 10 days

delete branch chef/automate

delete branch : ssd/swagger-config

delete time in 10 days

PR merged chef/automate

automate-gateway: use correct swagger configuration

This got reset by re-vendoring the dependency.

+1 -1

2 comments

1 changed file

stevendanna

pr closed time in 10 days

pull request commentchef/automate

automate-gateway: use correct swagger configuration

At the moment these swagger docs do seem broken unfortunately.

stevendanna

comment created time in 10 days

PR opened chef/automate

automate-gateway: use correct swagger configuration

This got reset by re-vendoring the dependency.

+1 -1

0 comment

1 changed file

pr created time in 10 days

create barnchchef/automate

branch : ssd/swagger-config

created branch time in 10 days

push eventchef/chef-server

Steven Danna

commit sha 4693c633ae72c766a1ad4aba6873d4b866d5a943

Upgrade to Elasticsearch 6.8.12 Fixes #2140 Signed-off-by: Steven Danna <steve@chef.io>

view details

Steven Danna

commit sha 35db6b46c63330a6b728ed80623adda54e76fcdd

Merge pull request #2142 from chef/ssd/bump-es Upgrade to Elasticsearch 6.8.12

view details

push time in 11 days

PR merged chef/chef-server

Upgrade to Elasticsearch 6.8.12

Fixes #2140

Signed-off-by: Steven Danna steve@chef.io

+4 -4

0 comment

1 changed file

stevendanna

pr closed time in 11 days

issue closedchef/chef-server

Incorrect license in elasticsearch omnibus software definition

The Omnibus software definition for Elasticsearch states that it's Apache 2.0 licensed but the source URL is linking to the default distribution which contains code under the Elastic License.

The OSS-only version can be downloaded from https://www.elastic.co/downloads/past-releases/elasticsearch-oss-6-8-1.

Also note that this is release is missing quite a few security patches since the latest in the 6.8 branch is 6.8.12.

closed time in 11 days

smith

push eventchef/automate

Steven Danna

commit sha ea100bb080730b3ef08f081c84211abc6c38900f

automate-elasticsearch update to 6.8.12 (#4405) The complete list of CVEs this will address is hard to compile as many of the issues are in libraries used inside ES or the s3-repository plugin we download. For example, the jackson-databind dependency, was at version 2.8.11.3 previously: /hab/pkgs/chef/automate-elasticsearch/6.8.3/20201015010521/es/modules/ingest-geoip/jackson-databind-2.8.11.3.jar /hab/pkgs/chef/automate-elasticsearch/6.8.3/20201015010521/es/plugins/repository-s3/jackson-databind-2.8.11.3.jar and should now be at 2.8.11.6 /hab/pkgs/chef/automate-elasticsearch/6.8.12/20201016121850/es/modules/ingest-geoip/jackson-databind-2.8.11.6.jar /hab/pkgs/chef/automate-elasticsearch/6.8.12/20201016121850/es/plugins/repository-s3/jackson-databind-2.8.11.6.jar The changelog for this library claims the following CVEs have been addressed: https://github.com/FasterXML/jackson-databind/blob/2.8/release-notes/VERSION CVE-2020-9546 CVE-2020-9547 CVE-2020-9548 CVE-2019-14540 CVE-2019-14439 CVE-2019-16335 CVE-2019-17267 CVE-2019-16942 CVE-2019-16943 CVE-2019-17531 CVE-2019-20330 CVE-2020-8840 CVE-2019-12384 CVE-2019-12814 CVE-2019-14379 CVE-2019-14439 Signed-off-by: Steven Danna <steve@chef.io>

view details

push time in 11 days

delete branch chef/automate

delete branch : ssd/bump-elasticsearch

delete time in 11 days

PR merged chef/automate

automate-elasticsearch update to 6.8.12

The complete list of CVEs this will address is hard to compile as many of the issues are in libraries used inside ES or the s3-repository plugin we download.

For example, the jackson-databind dependency, was at version 2.8.11.3 previously:

/hab/pkgs/chef/automate-elasticsearch/6.8.3/20201015010521/es/modules/ingest-geoip/jackson-databind-2.8.11.3.jar
/hab/pkgs/chef/automate-elasticsearch/6.8.3/20201015010521/es/plugins/repository-s3/jackson-databind-2.8.11.3.jar

and should now be at 2.8.11.6

/hab/pkgs/chef/automate-elasticsearch/6.8.12/20201016121850/es/modules/ingest-geoip/jackson-databind-2.8.11.6.jar
/hab/pkgs/chef/automate-elasticsearch/6.8.12/20201016121850/es/plugins/repository-s3/jackson-databind-2.8.11.6.jar

The changelog for this library claims the following CVEs have been addressed:

https://github.com/FasterXML/jackson-databind/blob/2.8/release-notes/VERSION

CVE-2020-9546 CVE-2020-9547 CVE-2020-9548 CVE-2019-14540 CVE-2019-14439 CVE-2019-16335 CVE-2019-17267 CVE-2019-16942 CVE-2019-16943 CVE-2019-17531 CVE-2019-20330 CVE-2020-8840 CVE-2019-12384 CVE-2019-12814 CVE-2019-14379 CVE-2019-14439

Signed-off-by: Steven Danna steve@chef.io

+3 -3

1 comment

2 changed files

stevendanna

pr closed time in 11 days

push eventchef/automate

Steven Danna

commit sha 760fcae8e05ba74513adf0160b360511eae22ca4

automate-gateway: update swagger-ui to 3.35.2 (#4406) Signed-off-by: Steven Danna <steve@chef.io>

view details

push time in 11 days

delete branch chef/automate

delete branch : ssd/update-swagger-ui

delete time in 11 days

PR merged chef/automate

automate-gateway: update swagger-ui to 3.35.2

Signed-off-by: Steven Danna steve@chef.io

+103 -204

3 comments

17 changed files

stevendanna

pr closed time in 11 days

pull request commentchef/automate

automate-gateway: update swagger-ui to 3.35.2

@jaym I was able to navigate to it after this change still...any pointer to why we think it doesn't work?

stevendanna

comment created time in 11 days

PR opened chef/automate

automate-gatway: update swagger-ui to 3.35.2

Signed-off-by: Steven Danna steve@chef.io

+103 -204

0 comment

17 changed files

pr created time in 14 days

create barnchchef/automate

branch : ssd/update-swagger-ui

created branch time in 14 days

Pull request review commentchef/automate

automate-elasticsearch update to 6.8.12

 def parse_products_meta_file(path)   # while still allowing the clients of these databases to upgrade their client   # libraries if any fixes are shipped there.   "automate-postgresql"    => {"origin" => "chef", "name" => "automate-postgresql",    "version" => "9.6.11", "release" => "20200929122522"},-  "automate-elasticsearch" => {"origin" => "chef", "name" => "automate-elasticsearch", "version" => "6.8.3",  "release" => "20200929123629"},+  # "automate-elasticsearch" => {"origin" => "chef", "name" => "automate-elasticsearch", "version" => "6.8.3",  "release" => "20200929123629"},

This needs to unpin to actually test the update. We can re-pin it once the new package version hits the dev channel.

stevendanna

comment created time in 14 days

PullRequestReviewEvent

PR opened chef/automate

automate-elasticsearch update to 6.8.12

The complete list of CVEs this will address is hard to compile as many of the issues are in libraries used inside ES or the s3-repository plugin we download.

For example, the jackson-databind dependency, was at version 2.8.11.3 previously:

/hab/pkgs/chef/automate-elasticsearch/6.8.3/20201015010521/es/modules/ingest-geoip/jackson-databind-2.8.11.3.jar
/hab/pkgs/chef/automate-elasticsearch/6.8.3/20201015010521/es/plugins/repository-s3/jackson-databind-2.8.11.3.jar

and should now be at 2.8.11.6

/hab/pkgs/chef/automate-elasticsearch/6.8.12/20201016121850/es/modules/ingest-geoip/jackson-databind-2.8.11.6.jar /hab/pkgs/chef/automate-elasticsearch/6.8.12/20201016121850/es/plugins/repository-s3/jackson-databind-2.8.11.6.jar

The changelog for this library claims the following CVEs have been addressed:

https://github.com/FasterXML/jackson-databind/blob/2.8/release-notes/VERSION

CVE-2020-9546 CVE-2020-9547 CVE-2020-9548 CVE-2019-14540 CVE-2019-14439 CVE-2019-16335 CVE-2019-17267 CVE-2019-16942 CVE-2019-16943 CVE-2019-17531 CVE-2019-20330 CVE-2020-8840 CVE-2019-12384 CVE-2019-12814 CVE-2019-14379 CVE-2019-14439

Signed-off-by: Steven Danna steve@chef.io

+3 -3

0 comment

2 changed files

pr created time in 14 days

create barnchchef/automate

branch : ssd/bump-elasticsearch

created branch time in 14 days

push eventchef/chef-server

Steven Danna

commit sha 129abb7ffe46f9911056ae51aef0303a10055620

cookbooks: set explicit TMPDIR for elasticsearch Elasticsearch requires a temporary directory on which it can create executable files to support the execution of native code via JNA. Many of our customers have /tmp mounted noexec by default causing problems. This configures elasticsearch to use a custom tmp directory. Similar changes have previously been made in chef-backend and Chef Automate Signed-off-by: Steven Danna <steve@chef.io>

view details

Steven Danna

commit sha 6d0759a65e72147314a70af32e10761c8dc8a18f

Merge pull request #2146 from chef/ssd/more-tempdir cookbooks: set explicit TMPDIR for elasticsearch

view details

push time in 15 days

PR merged chef/chef-server

cookbooks: set explicit TMPDIR for elasticsearch

Elasticsearch requires a temporary directory on which it can create executable files to support the execution of native code via JNA.

Many of our customers have /tmp mounted noexec by default causing problems.

This configures elasticsearch to use a custom tmp directory.

Similar changes have previously been made in chef-backend and Chef Automate

Signed-off-by: Steven Danna steve@chef.io

+4 -0

0 comment

1 changed file

stevendanna

pr closed time in 15 days

PR opened chef/chef-server

cookbooks: set explicit TMPDIR for elasticsearch

Elasticsearch requires a temporary directory on which it can create executable files to support the execution of native code via JNA.

Many of our customers have /tmp mounted noexec by default causing problems.

This configures elasticsearch to use a custom tmp directory.

Similar changes have previously been made in chef-backend and Chef Automate

Signed-off-by: Steven Danna steve@chef.io

+4 -0

0 comment

1 changed file

pr created time in 15 days

create barnchchef/chef-server

branch : ssd/more-tempdir

created branch time in 15 days

push eventchef/chef-server

Steven Danna

commit sha 403e83a2f8e277a6d41b651444d43d06c0d77db3

cookbooks: remove reference to ElasticsearchPreflightValidator Signed-off-by: Steven Danna <steve@chef.io>

view details

Steven Danna

commit sha 346e68e2b508a61c8a2121b397c90224b1810bf0

Merge pull request #2145 from chef/ssd/remove-unused-code cookbooks: remove reference to ElasticsearchPreflightValidator

view details

push time in 16 days

PR merged chef/chef-server

cookbooks: remove reference to ElasticsearchPreflightValidator

This class is no more.

Signed-off-by: Steven Danna steve@chef.io

+0 -3

0 comment

1 changed file

stevendanna

pr closed time in 16 days

PR opened chef/chef-server

cookbooks: remove reference to ElasticsearchPreflightValidator

This class is no more.

Signed-off-by: Steven Danna steve@chef.io

+0 -3

0 comment

1 changed file

pr created time in 16 days

create barnchchef/chef-server

branch : ssd/remove-unused-code

created branch time in 16 days

push eventchef/automate

Steven Danna

commit sha 09f8e51a26128494796be7d0f9a30340c7f618a9

Wire sample-data-service into deployment We still need configuration protobufs for this service. Signed-off-by: Steven Danna <steve@chef.io>

view details

push time in 17 days

Pull request review commentchef/automate

[WIP] sample-data-service mock infra client data

       "type": "product",       "dependencies": ["automate"],       "services": []+    },+    {+      "name": "sample-data",+      "type": "product",+      "dependencies": ["automate"],+      "services": []

I think we'll want to list the new services in this service array as chef/sample-data-service

vsingh-msys

comment created time in 17 days

PullRequestReviewEvent

pull request commentchef/chef-server

maintainers: remove Ryan Cragun

@ryancragun No worries if not, but if you would like feel free to move yourself to the "Past Maintainers" section.

ryancragun

comment created time in 18 days

pull request commentchef/chef-server

maintainers: remove Ryan Cragun

@srenatus Updated

ryancragun

comment created time in 18 days

push eventstevendanna/stevendanna.github.com

Steven Danna

commit sha c8492f21fb801854526d542742627bebd1a69f48

add resume Signed-off-by: Steven Danna <steve@chef.io>

view details

push time in 22 days

issue commentchef/chef-server

Incorrect license in elasticsearch omnibus software definition

Thanks @smith! I've opened a pull request to fix this which is running through our test pipeline now.

smith

comment created time in 22 days

PR opened chef/chef-server

Upgrade to Elasticsearch 6.8.12

Fixes #2140

Signed-off-by: Steven Danna steve@chef.io

+4 -4

0 comment

1 changed file

pr created time in 22 days

push eventchef/chef-server

Steven Danna

commit sha 4693c633ae72c766a1ad4aba6873d4b866d5a943

Upgrade to Elasticsearch 6.8.12 Fixes #2140 Signed-off-by: Steven Danna <steve@chef.io>

view details

push time in 23 days

create barnchchef/chef-server

branch : ssd/bump-es

created branch time in 23 days

PR opened chef/automate

Clarify use of "secrets-helper"

This tool is unfortunately named.

Signed-off-by: Steven Danna steve@chef.io

+7 -3

0 comment

1 changed file

pr created time in 23 days

create barnchchef/automate

branch : ssd/clarify-a-thing

created branch time in 23 days

push eventchef/license_scout

Steven Danna

commit sha 37ddb4ccf58a7ba84ca9b57e82d0783597390877

Add license for parser gem Signed-off-by: Steven Danna <steve@chef.io>

view details

Steven Danna

commit sha 6cd6098529466cc2e9700a9a5cb988905d518e06

Merge pull request #242 from chef/ssd/parser-license Add license for parser gem

view details

push time in 23 days

PR merged chef/license_scout

Add license for parser gem

Signed-off-by: Steven Danna steve@chef.io

+1 -0

0 comment

1 changed file

stevendanna

pr closed time in 23 days

PR opened chef/license_scout

Add license for parser gem

Signed-off-by: Steven Danna steve@chef.io

+1 -0

0 comment

1 changed file

pr created time in 23 days

create barnchchef/license_scout

branch : ssd/parser-license

created branch time in 23 days

push eventchef/chef-server

Steven Danna

commit sha c55c19135e0714362f31702e24a1661d816fc881

cookbooks: fix bug in heap_size validator Signed-off-by: Steven Danna <steve@chef.io>

view details

Steven Danna

commit sha 69922eb490eeb5a037b1022331b40851fb51f627

Merge pull request #2138 from chef/ssd/heap-size-warning cookbooks: fix bug in heap_size validator

view details

push time in 24 days

PR merged chef/chef-server

cookbooks: fix bug in heap_size validator

Signed-off-by: Steven Danna steve@chef.io

+2 -2

0 comment

1 changed file

stevendanna

pr closed time in 24 days

PR opened chef/chef-server

cookbooks: fix bug in heap_size validator

Signed-off-by: Steven Danna steve@chef.io

+2 -2

0 comment

1 changed file

pr created time in 24 days

create barnchchef/chef-server

branch : ssd/heap-size-warning

created branch time in 24 days

push eventchef/automate

Steven Danna

commit sha e099b711c67ea6f09abcb71fd700cee09caccbfe

ci: try fixing the path to the working dir Signed-off-by: Steven Danna <steve@chef.io>

view details

push time in 25 days

push eventchef/automate

Steven Danna

commit sha 4c5440f27043635ad6e121fd36a68443e50f9fc0

tests: don't check cert validity period for internal CA (#4391) We already put in a filter to ignore this check in 3a2e03b6e9e57bae4cd6fcf12c38fb153453cc11 but it appears that drwetter/testssl.sh#1741 changed the name of the check. Signed-off-by: Steven Danna <steve@chef.io>

view details

push time in 25 days

delete branch chef/automate

delete branch : ssd/sslcheck-fix

delete time in 25 days

PR merged chef/automate

tests: don't check cert validity period for internal CA

We already put in a filter to ignore this check in 3a2e03b6e9e57bae4cd6fcf12c38fb153453cc11 but it appears that drwetter/testssl.sh#1741 changed the name of the check.

Signed-off-by: Steven Danna steve@chef.io

+2 -0

1 comment

1 changed file

stevendanna

pr closed time in 25 days

push eventchef/automate

Steven Danna

commit sha b8b52a2a210d7d24ee6b5e2a03a0d80e9d2cb7bd

tests: don't check cert validity period for internal CA We already put in a filter to ignore this check in 3a2e03b6e9e57bae4cd6fcf12c38fb153453cc11 but it appears that drwetter/testssl.sh#1741 changed the name of the check. Signed-off-by: Steven Danna <steve@chef.io>

view details

push time in 25 days

Pull request review commenthabitat-sh/core-plans

[dex] bump v2.24.0 -> v2.25.0, simplify plan.sh

 pkg_deps=(core/glibc) pkg_build_deps=(core/go core/git core/gcc) pkg_bin_dirs=(bin) -do_before() {-  GOPATH=$HAB_CACHE_SRC_PATH/$pkg_dirname-  export GOPATH-}--do_prepare() {-  export GO_LDFLAGS="-w -X $gopkg/version.Version=v$pkg_version"-}--do_download() {-  return 0-}--do_verify() {-  return 0-}--# Use unpack instead of download, so that plan-build can manage the-# source path. This ensures us a clean checkout every time we build.-do_unpack() {-  git clone "$pkg_source" "$GOPATH/src/$gopkg"-  ( cd "$GOPATH/src/$gopkg" || exit-    git reset --hard "v$pkg_version"-  )-}- do_build() {-  go build --ldflags "${GO_LDFLAGS}" -o "$pkg_prefix/bin/dex" "$gopkg/cmd/dex"+  cat >scripts/git-version <<EOF+#/bin/sh

Should this be #!? My guess is it works regardless, but probably best for clarity.

srenatus

comment created time in 25 days

PullRequestReviewEvent

PR opened chef/automate

tests: don't check cert validity period for internal CA

We already put in a filter to ignore this check in 3a2e03b6e9e57bae4cd6fcf12c38fb153453cc11 but it appears that drwetter/testssl.sh#1741 changed the name of the check.

Signed-off-by: Steven Danna steve@chef.io

+1 -0

0 comment

1 changed file

pr created time in 25 days

create barnchchef/automate

branch : ssd/sslcheck-fix

created branch time in 25 days

issue closedchef/automate

automate-workflow cleanup tasks

User Story

The automate-workflow code base was imported as-is from the old Chef Automate 1 code base. This feature is provided for users upgrading from Chef Automate 1 who have already invested in workflow. While we do not expect to do any large-scale feature-work in this code base, it is likely that we have to do ongoing maintenance for this feature until there are no supported users of it.

Towards that end, we should cleanup loose ends that might confuse users and future developers or make maintenance harder.

New Features

  • [ ] HTTP communications between workflow and authn: Workflow communicates with Authn over HTTP rather than HTTPS. This is currently mitigated by it being a localhost-only communication, but now that the code base is imported it should be easier to support HTTPS.

Cleanup Opportunities

  • [x] Remove dangerous or non-functional automate-ctl commands. (In progress: https://github.com/chef/automate/pull/59)

  • [ ] Remove unused modules from the workflow-server codebase

    • [x] Insights (https://github.com/chef/automate/pull/57)
    • [x] Visibility (https://github.com/chef/automate/pull/57)
    • [ ] Notifications (maybe? I haven't investigated whether we can)
  • [ ] Investigate unused code from the workflow-web codebase. I'm not familiar with this project to know if there is unused code in what we imported.

  • [x] Removed unused code from workflow-nginx: https://github.com/chef/automate/pull/80

Test Integration

On merge we added some very basic tests, but there are more tests in the workflow codebases that haven't been hooked up into CI and/or that could be extended.

  • [ ] Extend workflow integration test to include creating a pipeline
  • [ ] Extend workflow integration test to include creating a runner
  • [ ] Unit tests for automate-workflow-ctl
  • [ ] Integration tests for automate-workflow-ctl
  • [ ] Unit tests for automate-workflow-web
  • [ ] e2e tests for automate-workflow-web
  • [ ] schema tests for automate-workflow-server
  • [ ] dialyzer for automate-workflow-server

closed time in a month

stevendanna

issue commentchef/automate

automate-workflow cleanup tasks

Given that the plan is to remove workflow from automate in a few months, I think we can close this as the work is incredibly unlikely to happen.

stevendanna

comment created time in a month

issue closedchef/chef-server

Rabbitmq fails to restart on first reconfigure

We have an intermittent failure on the first run of chef-server-ctl. Rerunning always seems to fix the problem. The failure is late enough in the first run that we're just running triggered restarts, and all configuration is done.

Chef Server Version

Chef Server 13.0.14

Platform Details

Ubuntu 16.04, in vagrant/virtualbox (also seems to intermittently repro on AWS m4.large)

Configuration

Standalone, & Tiered, HA. New install or upgrade.

Scenario:

Install chef server

Steps to Reproduce:

dpkg -i chef-server-core_13.0.14-1_amd64.deb chef-server-ctl reconfigure --verbose --chef-license accept

Expected Result:

chef-server-ctl to succeed

Actual Result:

---- Begin output of /opt/opscode/embedded/bin/sv restart /opt/opscode/service/rabbitmq ----

STDOUT: timeout: run: /opt/opscode/service/rabbitmq: (pid 7674) 68s

Full dump here (this is from a dev-vm but we can reproduce elsewhere) https://gist.github.com/markan/ac0dbcf8ed9a9fab968868fbbff9bfcd

closed time in a month

markan

issue closedchef/chef-server

Status endpoint intermittently failing on upgrade to Chef Infra Server 13.1.13

Creating an issue from https://discourse.chef.io/t/status-endpoint-intermittently-failing-after-upgrading-to-chef-server-core-13-1-13-1/16585

Chef Server Version

13.1.13

Configuration

Standalone

Actual Result:

We have a health check monitor that hits our chef server's /_status endpoint every minute. Once I upgraded from chef-server-core 13.0.17 to 13.1.13 last week, this endpoint has been intermittently failing about once or twice a day with a 500 response code and the following response:

{"status":"fail","upstreams":{"chef_solr":"fail","chef_sql":"pong","chef_index":"pong","oc_chef_action":"pong","oc_chef_authz":"pong"},"keygen":{"keys":10,"max":10,"max_workers":1,"cur_max_workers":1,"inflight":0,"avail_workers":1,"start_size":0},"indexing":{"mode":"rabbitmq","indexer_message_queue_length":0},"analytics_queue":{"queue_at_capacity":false,"dropped_since_last_check":0,"max_length":10000,"last_recorded_length":0,"total_dropped":0,"check_count":2959,"mailbox_length":0}}

This chef server is just a single chef server running on a t3.medium in ec2 (averages about 10% cpu, 45% memory, so I don't believe it's overloaded). The server is using all the default settings except for setting a custom SSL certificate for nginx. It's been running for a little over 2 years without any issues and I haven't made any changes to our environment except for upgrading the chef-server-core package. Even though the /_status endpoint is intermittently failing, the rest of the server appears to be working fine and I'm not seeing any issues when running chef-client.

The response seems to indicate some issue with solr, however I'm really struggling to find any problems with solr. I've scoured all of the logs in /var/log/opscode and can't find any indication of any problem in our solr logs. The erchef crash logs contain the following text but nothing else:

2020-01-26 13:09:32 =ERROR REPORT====
{<<"method=GET; path=/_status; status=500; ">>,"Internal Server Error"}

and the erchef current log contains the following which also isn't super helpful:

2020-01-26_13:09:32.55911 [error] /_status
2020-01-26_13:09:32.55912 {{status,fail},{upstreams,{[{<<"chef_solr">>,<<"fail">>},{<<"chef_sql">>,<<"pong">>},{<<"chef_index">>,<<"pong">>},{<<"oc_chef_action">>,<<"pong">>},{<<"oc_chef_authz">>,<<"pong">>}]}},{<<"analytics_queue">>,{[{queue_at_capacity,false},{dropped_since_last_check,0},{max_length,10000},{last_recorded_length,0},{total_dropped,0},{check_count,2959},{mailbox_length,0}]}}}
2020-01-26_13:09:32.56091 [error] {<<"method=GET; path=/_status; status=500; ">>,"Internal Server Error"}

I also tried looking through every other possible log in the /var/log/opscode folder during the time of these failures and haven't been able to find anything interesting.

I started looking through the chef-server code to see what the /_status endpoint is actually doing. It appears that to check chef_solr's heath, this endpoint makes an http request to the following endpoint: http://127.0.0.1:8983/solr/admin/ping?wt=json

So I setup a tcpdump to see all traffic to 127.0.0.1:8983 to see if I could catch what response solr was returning to the /_status endpoint. I was able to successfully see all of the ping requests coming from the /_status endpoint to solr's ping endpoint and was able to see the response that solr returned. I then waited until the /_status endpoint returned a 500 error again. I then checked the tcpdump logs and found that during the failure, the /_status endpoint did not send any request to the solr ping endpoint at all. So it seems like there is some issue where a small amount of time the /_status endpoint is marking chef_solr as failed but not actually sending any ping request to solr. Additionally, I also setup a script that runs every minute and records the results of http://127.0.0.1:8983/solr/admin/ping?wt=json to confirm that there are no issues affecting solr. I ran this script on cron every minute for days and the solr ping endpoint never failed once. It seems like the issue has something to do with the erchef /_status endpoint, but I'm not sure what.

I also saw in the code for the /_status endpoint that it has a 400ms timeout, but when our _status endpoint fails, it's often failing within 40-50ms, so I don't think that is the issue. At any rate, I tried increasing opscode_erchef['health_ping_timeout'] to 1000, but it hasn't helped. -I also tried looking through the release notes for 13.1.13, but didn't really see anything obvious that could be a culprit. However, I'm fairly convinced it has something to do with a change in this release. I tried reverting our server back to 13.0.17 from a previous snapshot and the error disappeared. I've tried the upgrade from 13.0.17 -> 13.1.13 twice now and both times this error appears shortly after the upgrade.

-I've also tried obvious things like restarting the chef server, rebooting the server, etc, but that has not helped.

If anyone has any ideas on how to troubleshoot this further or might have any ideas on what's causing this, it would be appreciated. For now, I'm just making our health check less sensitive.

closed time in a month

PrajaktaPurohit

issue commentchef/chef-server

Status endpoint intermittently failing on upgrade to Chef Infra Server 13.1.13

We believe this should be solved in 82ef9c49ed1f83a3cc8fc175db7ebf2e2b6697c6

PrajaktaPurohit

comment created time in a month

issue closedchef/chef-server

Rabbitmq var/ directory misconfigured during 12.4.1 install

After a chef-server 12.4.1 install on a clean ubuntu 14.04 via the chef-ingredient cookbook, rabbitmq failed to start. Its runit log revealed:

2016-03-29_13:38:55.90528 mkdir: cannot create directory '/opt/opscode/embedded/service/rabbitmq/sbin/../var': Permission denied

I think it's just a missing symlink, so as a workaround, I did:

ln -s /var/opt/opscode/rabbitmq /opt/opscode/embedded/service/rabbitmq/var

rmq then started fine.

closed time in a month

clintoncwolfe

issue commentchef/chef-server

Rabbitmq var/ directory misconfigured during 12.4.1 install

RabbitMQ is no longer included in Chef Infra Server.

clintoncwolfe

comment created time in a month

issue closedchef/chef-server

Chef Infra Server search(solr) uses supported/secure software(es), resolving open CVEs

Replaces solr with Elasticsearch to remove unsupported solr package that contains security vulnerabilities.

Operationally brings Chef Infra Server users toward migrating to the Automate Infra API (automate uses elasticsearch)

Aha! Link: https://chef.aha.io/epics/CHEF-E-2

closed time in a month

btm

issue closedchef/chef-server

8. Update to Erlang 22

Once the Chef Infra Server is upgraded to Erlang 20, and the LDAP bug in #1642 is tested, Proceed with updating the dependencies to the latest versions and update erlang to version 22.

[ ] Update the dependencies with rebar3 to the latest.

[ ] Change software definition at https://github.com/chef/omnibus-software/blob/master/config/software/erlang.rb with version 22 of erlang.

[ ] Update https://github.com/chef/chef-server/blob/master/omnibus_overrides.rb#L5 to the above updated version.

[ ] Make the necessary code changes to oc_erchef, oc_bifrost, bookshelf.

[ ] Update the buildkite pipeline to test with Erlang 22.

Acceptance Criteria: [ ] Verify pipeline passes with erlang 22. [ ] Adhoc version of Chef-server builds with Erlang 22.

closed time in a month

PrajaktaPurohit

issue commentchef/chef-server

8. Update to Erlang 22

Done!

PrajaktaPurohit

comment created time in a month

PullRequestReviewEvent
more